diff --git a/go.mod b/go.mod index fb8ac9ee8e4c..5697159768c7 100644 --- a/go.mod +++ b/go.mod @@ -103,22 +103,22 @@ require ( gopkg.in/src-d/go-git.v4 v4.13.1 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.33.2 - k8s.io/apiextensions-apiserver v0.33.2 - k8s.io/apimachinery v0.33.2 - k8s.io/apiserver v0.33.2 - k8s.io/cli-runtime v0.33.2 - k8s.io/client-go v0.33.2 - k8s.io/component-base v0.33.2 - k8s.io/component-helpers v0.33.2 + k8s.io/api v0.33.4 + k8s.io/apiextensions-apiserver v0.33.4 + k8s.io/apimachinery v0.33.4 + k8s.io/apiserver v0.33.4 + k8s.io/cli-runtime v0.33.4 + k8s.io/client-go v0.33.4 + k8s.io/component-base v0.33.4 + k8s.io/component-helpers v0.33.4 k8s.io/klog v1.0.0 k8s.io/klog/v2 v2.130.1 - k8s.io/kube-aggregator v0.33.2 + k8s.io/kube-aggregator v0.33.4 k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff - k8s.io/kubectl v0.33.2 - k8s.io/kubernetes v1.33.2 - k8s.io/pod-security-admission v0.33.2 - k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 + k8s.io/kubectl v0.33.4 + k8s.io/kubernetes v1.33.4 + k8s.io/pod-security-admission v0.33.4 + k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d sigs.k8s.io/cloud-provider-azure v1.30.4 sigs.k8s.io/gateway-api v1.2.1 sigs.k8s.io/kustomize/kyaml v0.19.0 @@ -414,35 +414,35 @@ require ( replace ( github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20250416174521-4eb003743b54 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc => go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 - k8s.io/api => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20250716113245-b94367cabf3e - k8s.io/apiextensions-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20250716113245-b94367cabf3e - k8s.io/apimachinery => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20250716113245-b94367cabf3e - k8s.io/apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20250716113245-b94367cabf3e - k8s.io/cli-runtime => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20250716113245-b94367cabf3e - k8s.io/client-go => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20250716113245-b94367cabf3e - k8s.io/cloud-provider => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20250716113245-b94367cabf3e - k8s.io/cluster-bootstrap => github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20250716113245-b94367cabf3e - k8s.io/code-generator => github.com/openshift/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20250716113245-b94367cabf3e - k8s.io/component-base => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20250716113245-b94367cabf3e - k8s.io/component-helpers => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20250716113245-b94367cabf3e - k8s.io/controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20250716113245-b94367cabf3e - k8s.io/cri-api => github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20250716113245-b94367cabf3e - k8s.io/cri-client => github.com/openshift/kubernetes/staging/src/k8s.io/cri-client v0.0.0-20250716113245-b94367cabf3e - k8s.io/csi-translation-lib => github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20250716113245-b94367cabf3e - k8s.io/dynamic-resource-allocation => github.com/openshift/kubernetes/staging/src/k8s.io/dynamic-resource-allocation v0.0.0-20250716113245-b94367cabf3e - k8s.io/endpointslice => github.com/openshift/kubernetes/staging/src/k8s.io/endpointslice v0.0.0-20250716113245-b94367cabf3e - k8s.io/externaljwt => github.com/openshift/kubernetes/staging/src/k8s.io/externaljwt v0.0.0-20250716113245-b94367cabf3e - k8s.io/kube-aggregator => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20250716113245-b94367cabf3e - k8s.io/kube-controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20250716113245-b94367cabf3e - k8s.io/kube-proxy => github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20250716113245-b94367cabf3e - k8s.io/kube-scheduler => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20250716113245-b94367cabf3e - k8s.io/kubectl => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20250716113245-b94367cabf3e - k8s.io/kubelet => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20250716113245-b94367cabf3e - k8s.io/kubernetes => github.com/openshift/kubernetes v1.30.1-0.20250815165952-eba09d2066a6 - k8s.io/metrics => github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20250716113245-b94367cabf3e - k8s.io/mount-utils => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20250716113245-b94367cabf3e - k8s.io/pod-security-admission => github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20250716113245-b94367cabf3e - k8s.io/sample-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20250716113245-b94367cabf3e - k8s.io/sample-cli-plugin => github.com/openshift/kubernetes/staging/src/k8s.io/sample-cli-plugin v0.0.0-20250716113245-b94367cabf3e - k8s.io/sample-controller => github.com/openshift/kubernetes/staging/src/k8s.io/sample-controller v0.0.0-20250716113245-b94367cabf3e + k8s.io/api => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20250906192346-6efb6a95323f + k8s.io/apiextensions-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20250906192346-6efb6a95323f + k8s.io/apimachinery => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20250906192346-6efb6a95323f + k8s.io/apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20250906192346-6efb6a95323f + k8s.io/cli-runtime => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20250906192346-6efb6a95323f + k8s.io/client-go => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20250906192346-6efb6a95323f + k8s.io/cloud-provider => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20250906192346-6efb6a95323f + k8s.io/cluster-bootstrap => github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20250906192346-6efb6a95323f + k8s.io/code-generator => github.com/openshift/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20250906192346-6efb6a95323f + k8s.io/component-base => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20250906192346-6efb6a95323f + k8s.io/component-helpers => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20250906192346-6efb6a95323f + k8s.io/controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20250906192346-6efb6a95323f + k8s.io/cri-api => github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20250906192346-6efb6a95323f + k8s.io/cri-client => github.com/openshift/kubernetes/staging/src/k8s.io/cri-client v0.0.0-20250906192346-6efb6a95323f + k8s.io/csi-translation-lib => github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20250906192346-6efb6a95323f + k8s.io/dynamic-resource-allocation => github.com/openshift/kubernetes/staging/src/k8s.io/dynamic-resource-allocation v0.0.0-20250906192346-6efb6a95323f + k8s.io/endpointslice => github.com/openshift/kubernetes/staging/src/k8s.io/endpointslice v0.0.0-20250906192346-6efb6a95323f + k8s.io/externaljwt => github.com/openshift/kubernetes/staging/src/k8s.io/externaljwt v0.0.0-20250906192346-6efb6a95323f + k8s.io/kube-aggregator => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20250906192346-6efb6a95323f + k8s.io/kube-controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20250906192346-6efb6a95323f + k8s.io/kube-proxy => github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20250906192346-6efb6a95323f + k8s.io/kube-scheduler => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20250906192346-6efb6a95323f + k8s.io/kubectl => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20250906192346-6efb6a95323f + k8s.io/kubelet => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20250906192346-6efb6a95323f + k8s.io/kubernetes => github.com/openshift/kubernetes v1.30.1-0.20250906192346-6efb6a95323f + k8s.io/metrics => github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20250906192346-6efb6a95323f + k8s.io/mount-utils => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20250906192346-6efb6a95323f + k8s.io/pod-security-admission => github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20250906192346-6efb6a95323f + k8s.io/sample-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20250906192346-6efb6a95323f + k8s.io/sample-cli-plugin => github.com/openshift/kubernetes/staging/src/k8s.io/sample-cli-plugin v0.0.0-20250906192346-6efb6a95323f + k8s.io/sample-controller => github.com/openshift/kubernetes/staging/src/k8s.io/sample-controller v0.0.0-20250906192346-6efb6a95323f ) diff --git a/go.sum b/go.sum index 6a7e79b784c9..3d92ad8d0d62 100644 --- a/go.sum +++ b/go.sum @@ -825,56 +825,56 @@ github.com/openshift/client-go v0.0.0-20250710075018-396b36f983ee h1:tOtrrxfDEW8 github.com/openshift/client-go v0.0.0-20250710075018-396b36f983ee/go.mod h1:zhRiYyNMk89llof2qEuGPWPD+joQPhCRUc2IK0SB510= github.com/openshift/cluster-network-operator v0.0.0-20240708200319-1cd8678b38fb h1:Dr0dbSQTAU9UaoAvimGjR+fsvwx2twJ5KR0s/jyAz88= github.com/openshift/cluster-network-operator v0.0.0-20240708200319-1cd8678b38fb/go.mod h1:LnhqxbWhAnhPwilJ4yX1/ly7wCMCYJKkaiSJQSh+Wjg= -github.com/openshift/kubernetes v1.30.1-0.20250815165952-eba09d2066a6 h1:HSwP3P0+rNG0yZ6t1T8xU5kXcdA5ohzcxfWjwh4Fxuk= -github.com/openshift/kubernetes v1.30.1-0.20250815165952-eba09d2066a6/go.mod h1:GwUMe2E0Dqe2YN/Nkg9QWNBktqiTR7y+HFxcIWKshXI= -github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20250716113245-b94367cabf3e h1:Y70IDoOnCCKQT4lIJxx2KkTifLuqD/vjRrzo1DxZ/iw= -github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20250716113245-b94367cabf3e/go.mod h1:swyY9Vxfl/p3AXSU4Q/mKx42sj30InC4qeKgvc5fZio= -github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20250716113245-b94367cabf3e h1:8Zix9zo3XUC6pH/jNGpOf7X0+FcRUuDh3mR3ymML8FA= -github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20250716113245-b94367cabf3e/go.mod h1:/CVw9MmbDdZ4GBOf9bqMYDrIc0qJd0a4Qu84V8KXZTw= -github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20250716113245-b94367cabf3e h1:sQkj6PipKz/Y+3dZuQQh0d9E9GREi+QSaveMYVDJuwA= -github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20250716113245-b94367cabf3e/go.mod h1:n6CqSdJo6YPuehXUA0UiMbsdzFjYhILDJPx1NTD1HXM= -github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20250716113245-b94367cabf3e h1:tRIbaeFxBRhSEniFS0N2fV36K9iByatvNSwMvNpzteY= -github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20250716113245-b94367cabf3e/go.mod h1:i9gqMWgKZrfRCfy7vWqhX6rtjhI2zC7DW1VdzWeJMl0= -github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20250716113245-b94367cabf3e h1:k0o/ukWgLcHK8ezn3k1RM1R2myqelmLuwMixl0XrJmo= -github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20250716113245-b94367cabf3e/go.mod h1:Bnv2weyfmuGaz0JlHiBUycneV4qsVmDVDKU5deHcwK8= -github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20250716113245-b94367cabf3e h1:8lSjcLUtudojAFXr8SzreSehOAJIyjY/5LJ6D6GgRVk= -github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20250716113245-b94367cabf3e/go.mod h1:sfyX6/LGAHhY0eEROpZebPNQEKdtJasfrFDAsiAgbTY= -github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20250716113245-b94367cabf3e h1:8mfFUpwj9dp6BvY2Zus7NXfaVUC6TCZuhRBW66/3c/U= -github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20250716113245-b94367cabf3e/go.mod h1:3sPTciVJ3U8sLYUw7dZ+OKS/kkgFrScb6++dGoQjUk8= -github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20250716113245-b94367cabf3e h1:xWXa+MVvDs/DYhspithzt6IveZuCFU0jav7o0bR4jow= -github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20250716113245-b94367cabf3e/go.mod h1:gTgBK2hE+UG/v9/ndhCSH7lwiXU0/+PN74iAAULIOB8= -github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20250716113245-b94367cabf3e h1:EjEv02SdQYBNb83tiG8MsuAHvT62k5ltrbv2exY3SK4= -github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20250716113245-b94367cabf3e/go.mod h1:bzV+wZXklQw0LPrOwZB0FS4d6t+tAP0pHtaFZ3Owyak= -github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20250716113245-b94367cabf3e h1:iA14GVhVbvR8uSez7zo7u/ugxTQ9F5bzeZNvq2e4RBQ= -github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20250716113245-b94367cabf3e/go.mod h1:WbHLQQJcuXJ+ouL8pUYJ/AXSemqMLMYN9Fzp4mMrXqA= -github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20250716113245-b94367cabf3e h1:jCtuSmEVgAG5/QkhJYVnQRrEfM2dyg8yNtrqJr4l6dw= -github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20250716113245-b94367cabf3e/go.mod h1:qT8AIIsYGJw7HOIWKzkSChjKDltacCzlJ1o0GrKOdB8= -github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20250716113245-b94367cabf3e h1:oLashy+jc7Us36R5atGzTIqS5S6bhbGLlihpHrE4njg= -github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20250716113245-b94367cabf3e/go.mod h1:PIJuhkLPltzm2tNVyE1vx6+nEj93cGNiCVWcCsiIWLM= -github.com/openshift/kubernetes/staging/src/k8s.io/cri-client v0.0.0-20250716113245-b94367cabf3e h1:kihOU9BOdshIy6paKgD+aOoFoctCDzu1cmA2fU0OcpQ= -github.com/openshift/kubernetes/staging/src/k8s.io/cri-client v0.0.0-20250716113245-b94367cabf3e/go.mod h1:Rkl5u/bFtxOXPaXQNkvjLFNxgV1CUZKCoUO3bb9rIpg= -github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20250716113245-b94367cabf3e h1:uRJvfVVTouQUwYB8X07X4gMoTEbrFbuQ39z/jVDRSVY= -github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20250716113245-b94367cabf3e/go.mod h1:2IqgnZC3uigqTQBmxX3AOCCeDdbPi4YtBMQSzXeNpRs= -github.com/openshift/kubernetes/staging/src/k8s.io/dynamic-resource-allocation v0.0.0-20250716113245-b94367cabf3e h1:s+po29QDjU4i7pYsgc0WhC/YOBz8S7D4Jc8g9utD+i0= -github.com/openshift/kubernetes/staging/src/k8s.io/dynamic-resource-allocation v0.0.0-20250716113245-b94367cabf3e/go.mod h1:tqpw7neD7QyvzzMBeABcz4ig5vrpdUksU1Zfc7Sd1u0= -github.com/openshift/kubernetes/staging/src/k8s.io/endpointslice v0.0.0-20250716113245-b94367cabf3e h1:/ATf+ZOKmWcQCgrj9U127xYmWLZqDScVtQFzaF1EFFo= -github.com/openshift/kubernetes/staging/src/k8s.io/endpointslice v0.0.0-20250716113245-b94367cabf3e/go.mod h1:N1orv1N7g3K12OIjUn1VQttmn+6vPzI0nqBISkXKBqw= -github.com/openshift/kubernetes/staging/src/k8s.io/externaljwt v0.0.0-20250716113245-b94367cabf3e h1:erAHrTEKMqASIREV/xZSlcaDV3MfHNCqrAM5ZjmYHiw= -github.com/openshift/kubernetes/staging/src/k8s.io/externaljwt v0.0.0-20250716113245-b94367cabf3e/go.mod h1:vTm400QuF+CUmrDNRkVpAACCBInj+1o5egaf7k61P8Y= -github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20250716113245-b94367cabf3e h1:wudLWBCJNh6j+10GHBFl7hNSTHX1ZQAftvVArKroDvc= -github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20250716113245-b94367cabf3e/go.mod h1:OVuOGQiTtsXkjKTt+welRKzNen9rKI3QgdUisssFj9g= -github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20250716113245-b94367cabf3e h1:7vxb3TiU9qub9ALMjU14hVOwHMDdCpRy0SOjLdYOuiE= -github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20250716113245-b94367cabf3e/go.mod h1:pKF40Xu1S/vLRUeJx/S5zAN1/MNZuX++F03fieGhxyE= -github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20250716113245-b94367cabf3e h1:QAs/ekoIQ+t+Qs2xTVNnTxtb2I8mlm0YJVuTQFsDtfs= -github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20250716113245-b94367cabf3e/go.mod h1:TMRqDuRy4rnmQP2uKXYFMkR4BzJQR6rLFCsvgmHRqtg= -github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20250716113245-b94367cabf3e h1:uQ6AxOOfDYupJyLibITpw0F1NQ/0ME536AfWfpwIXds= -github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20250716113245-b94367cabf3e/go.mod h1:ATJkUseAPJFZaRznnJeKqt+8ZCm0nVACjxyJXDapwPQ= -github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20250716113245-b94367cabf3e h1:Nq6+AX4YsLuZpGoKqC9N0megcLpfF+8DZlKNS1s9y8g= -github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20250716113245-b94367cabf3e/go.mod h1:8DQjgRb85tV8xSqqm1PzGxz1w59z7UieC4GPk/cRKY0= -github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20250716113245-b94367cabf3e h1:4S3mkysDZXRF0yR1hrviYnI4gS96fd87sG+3DeUAAHs= -github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20250716113245-b94367cabf3e/go.mod h1:+UmuDIUnxxGlHHQvFhdg4s1XMRX+MBU1n70IYo18IOk= -github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20250716113245-b94367cabf3e h1:edNgRWGEdZculmiISrUvYO0kORmedM1TYc3us0Zvh00= -github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20250716113245-b94367cabf3e/go.mod h1:umdoAPEnjFAF+Lrk8/wMGbxUAjRvgsC+szWYUHluvqs= +github.com/openshift/kubernetes v1.30.1-0.20250906192346-6efb6a95323f h1:Wedy8leKvM/Ry4Wssfl1qQdyWsw5PW7+2ExHmF0k3cI= +github.com/openshift/kubernetes v1.30.1-0.20250906192346-6efb6a95323f/go.mod h1:GwUMe2E0Dqe2YN/Nkg9QWNBktqiTR7y+HFxcIWKshXI= +github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20250906192346-6efb6a95323f h1:EJoUgXhGoIigpvlkXE4gsdXsSQo8n0B5rSO95B4zVrQ= +github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20250906192346-6efb6a95323f/go.mod h1:swyY9Vxfl/p3AXSU4Q/mKx42sj30InC4qeKgvc5fZio= +github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20250906192346-6efb6a95323f h1:D419wwC3ybVot8ArUQ7sjndlBxYywsFkN9k4hAQps/k= +github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20250906192346-6efb6a95323f/go.mod h1:/CVw9MmbDdZ4GBOf9bqMYDrIc0qJd0a4Qu84V8KXZTw= +github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20250906192346-6efb6a95323f h1:MKxU/gF5qAVP8Es6JJ9n9yonmYgjH8j3FFnuuJagIz4= +github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20250906192346-6efb6a95323f/go.mod h1:n6CqSdJo6YPuehXUA0UiMbsdzFjYhILDJPx1NTD1HXM= +github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20250906192346-6efb6a95323f h1:oabpOhSksDJKKrZ1BfN2yzYFRxB9VZr0oAW9F19DP6Q= +github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20250906192346-6efb6a95323f/go.mod h1:i9gqMWgKZrfRCfy7vWqhX6rtjhI2zC7DW1VdzWeJMl0= +github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20250906192346-6efb6a95323f h1:g5xTJ5jXaiKAq4BneP949N4m6XF1jubiZfd8Oc03r7E= +github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20250906192346-6efb6a95323f/go.mod h1:Bnv2weyfmuGaz0JlHiBUycneV4qsVmDVDKU5deHcwK8= +github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20250906192346-6efb6a95323f h1:SrE3wAb+hYgaps2bGHMv2H2gmz0zGpb+bLUUUEigOGg= +github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20250906192346-6efb6a95323f/go.mod h1:sfyX6/LGAHhY0eEROpZebPNQEKdtJasfrFDAsiAgbTY= +github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20250906192346-6efb6a95323f h1:9zjqiHP8yiND+7B9OWZ9DoG7WqACBjRqTHdEQwdyUD0= +github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20250906192346-6efb6a95323f/go.mod h1:3sPTciVJ3U8sLYUw7dZ+OKS/kkgFrScb6++dGoQjUk8= +github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20250906192346-6efb6a95323f h1:Jp/33Rd3Tl5rXnqREmd8qY407Awxh0JVkipL5r5/H4s= +github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20250906192346-6efb6a95323f/go.mod h1:gTgBK2hE+UG/v9/ndhCSH7lwiXU0/+PN74iAAULIOB8= +github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20250906192346-6efb6a95323f h1:F62pP32ysLXndPWMO/tlfI8shgR/waP+vlezi5phZwY= +github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20250906192346-6efb6a95323f/go.mod h1:bzV+wZXklQw0LPrOwZB0FS4d6t+tAP0pHtaFZ3Owyak= +github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20250906192346-6efb6a95323f h1:4CmtuArog0SzBTe4y1lAAJAew8gefu/ggyJbo7pKZtM= +github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20250906192346-6efb6a95323f/go.mod h1:WbHLQQJcuXJ+ouL8pUYJ/AXSemqMLMYN9Fzp4mMrXqA= +github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20250906192346-6efb6a95323f h1:3e9rTfAwj1K9BYa4GK0Uk0/7qqrEj0ld2QiXuwA9gtg= +github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20250906192346-6efb6a95323f/go.mod h1:qT8AIIsYGJw7HOIWKzkSChjKDltacCzlJ1o0GrKOdB8= +github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20250906192346-6efb6a95323f h1:wnL1TmvNCu94IW++NBFHCHPfMNUtZCw9pXzTKuB0y4I= +github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20250906192346-6efb6a95323f/go.mod h1:PIJuhkLPltzm2tNVyE1vx6+nEj93cGNiCVWcCsiIWLM= +github.com/openshift/kubernetes/staging/src/k8s.io/cri-client v0.0.0-20250906192346-6efb6a95323f h1:nCIb9cw63FlfhPcZssqdJspBk0ZqKVZYFrC4huxVvPw= +github.com/openshift/kubernetes/staging/src/k8s.io/cri-client v0.0.0-20250906192346-6efb6a95323f/go.mod h1:Rkl5u/bFtxOXPaXQNkvjLFNxgV1CUZKCoUO3bb9rIpg= +github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20250906192346-6efb6a95323f h1:tewl8rh98o+J+lkOdoazAZ+2NjAXYiVIQXgv+DbwZoA= +github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20250906192346-6efb6a95323f/go.mod h1:2IqgnZC3uigqTQBmxX3AOCCeDdbPi4YtBMQSzXeNpRs= +github.com/openshift/kubernetes/staging/src/k8s.io/dynamic-resource-allocation v0.0.0-20250906192346-6efb6a95323f h1:5li/56d3gCersTMFnE5eq16RF3rpw8YwwfgrAfycUJg= +github.com/openshift/kubernetes/staging/src/k8s.io/dynamic-resource-allocation v0.0.0-20250906192346-6efb6a95323f/go.mod h1:tqpw7neD7QyvzzMBeABcz4ig5vrpdUksU1Zfc7Sd1u0= +github.com/openshift/kubernetes/staging/src/k8s.io/endpointslice v0.0.0-20250906192346-6efb6a95323f h1:eaztsAiYt3Us5PGUgDcsy0Th2+h/Sz9LfJhe0nvRjCE= +github.com/openshift/kubernetes/staging/src/k8s.io/endpointslice v0.0.0-20250906192346-6efb6a95323f/go.mod h1:N1orv1N7g3K12OIjUn1VQttmn+6vPzI0nqBISkXKBqw= +github.com/openshift/kubernetes/staging/src/k8s.io/externaljwt v0.0.0-20250906192346-6efb6a95323f h1:9/4WlS/1P+5iqqmtO7MB9507vkMVMzL8AkZx1LfKGEI= +github.com/openshift/kubernetes/staging/src/k8s.io/externaljwt v0.0.0-20250906192346-6efb6a95323f/go.mod h1:vTm400QuF+CUmrDNRkVpAACCBInj+1o5egaf7k61P8Y= +github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20250906192346-6efb6a95323f h1:EoHUMsL4doI+9yMcAJaY2RbLNKr1N8U4OnaWbXUclkM= +github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20250906192346-6efb6a95323f/go.mod h1:OVuOGQiTtsXkjKTt+welRKzNen9rKI3QgdUisssFj9g= +github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20250906192346-6efb6a95323f h1:FM0TcvxvQ99Vr4aYXVpp/R8glK2igtRPr0pyziYQTkc= +github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20250906192346-6efb6a95323f/go.mod h1:pKF40Xu1S/vLRUeJx/S5zAN1/MNZuX++F03fieGhxyE= +github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20250906192346-6efb6a95323f h1:Ueyh8QNSWZ1ydm1H584U0DPW8h9dyHg0JNFiElqb+DA= +github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20250906192346-6efb6a95323f/go.mod h1:TMRqDuRy4rnmQP2uKXYFMkR4BzJQR6rLFCsvgmHRqtg= +github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20250906192346-6efb6a95323f h1:uJpOhH6a/d2cLk1PumtZb3YswZbOr7odYFeuay1/GxE= +github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20250906192346-6efb6a95323f/go.mod h1:ATJkUseAPJFZaRznnJeKqt+8ZCm0nVACjxyJXDapwPQ= +github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20250906192346-6efb6a95323f h1:sh66f8O//LtxjZ0V8ScMGA+Fu3jKaP1hZdY0MW+Qot0= +github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20250906192346-6efb6a95323f/go.mod h1:8DQjgRb85tV8xSqqm1PzGxz1w59z7UieC4GPk/cRKY0= +github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20250906192346-6efb6a95323f h1:lODwdkoyvy2ko7qUHF81ANbf3mnL1yIQ+kL1mXZ9ATo= +github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20250906192346-6efb6a95323f/go.mod h1:+UmuDIUnxxGlHHQvFhdg4s1XMRX+MBU1n70IYo18IOk= +github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20250906192346-6efb6a95323f h1:jIDAd7DCzt0HvUqD1ZffC1BwnRup9fmPLMZdbtDPZpY= +github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20250906192346-6efb6a95323f/go.mod h1:umdoAPEnjFAF+Lrk8/wMGbxUAjRvgsC+szWYUHluvqs= github.com/openshift/library-go v0.0.0-20250812160438-378de074fe7b h1:AvoeP4LZgeHXTeNO7HiSdIxPbYrKvpJFa1JNTiYrx8M= github.com/openshift/library-go v0.0.0-20250812160438-378de074fe7b/go.mod h1:tptKNust9MdRI0p90DoBSPHIrBa9oh+Rok59tF0vT8c= github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20250416174521-4eb003743b54 h1:ehXndVZfIk/fo18YJCMJ+6b8HL8tzqjP7yWgchMnfCc= @@ -1593,8 +1593,8 @@ k8s.io/kms v0.33.2 h1:GFwNXX4CZGQCg9DPOaJi1/+iKidCtB9/OIAGdzRo8FI= k8s.io/kms v0.33.2/go.mod h1:C1I8mjFFBNzfUZXYt9FZVJ8MJl7ynFbGgZFbBzkBJ3E= k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= -k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= -k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d h1:wAhiDyZ4Tdtt7e46e9M5ZSAJ/MnPGPs+Ki1gHw4w1R0= +k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1 h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/images/tests/Dockerfile.rhel b/images/tests/Dockerfile.rhel index fcb3198723f3..c6b972406039 100644 --- a/images/tests/Dockerfile.rhel +++ b/images/tests/Dockerfile.rhel @@ -22,5 +22,5 @@ RUN PACKAGES="git gzip util-linux" && \ LABEL io.k8s.display-name="OpenShift End-to-End Tests" \ io.openshift.release.operator=true \ io.k8s.description="OpenShift is a platform for developing, building, and deploying containerized applications." \ - io.openshift.build.versions="kubernetes-tests=1.33.3" \ + io.openshift.build.versions="kubernetes-tests=1.33.4" \ io.openshift.tags="openshift,tests,e2e" diff --git a/test/extended/util/image/zz_generated.txt b/test/extended/util/image/zz_generated.txt index b99bb9e223f6..e49677857ced 100644 --- a/test/extended/util/image/zz_generated.txt +++ b/test/extended/util/image/zz_generated.txt @@ -6,7 +6,7 @@ quay.io/openshifttest/multicast:1.1 quay.io/openshift/community-e2e-images:e2e-q quay.io/redhat-developer/nfs-server:1.1 quay.io/openshift/community-e2e-images:e2e-quay-io-redhat-developer-nfs-server-1-1-dlXGfzrk5aNo8EjC quay.io/redhat-developer/test-build-roots2i:1.2 quay.io/openshift/community-e2e-images:e2e-quay-io-redhat-developer-test-build-roots2i-1-2-gLJ7WcnS2TSllJ32 quay.io/redhat-developer/test-build-simples2i:1.2 quay.io/openshift/community-e2e-images:e2e-quay-io-redhat-developer-test-build-simples2i-1-2-thirLMR-JKplfkmE -registry.k8s.io/build-image/distroless-iptables:v0.7.6 quay.io/openshift/community-e2e-images:e2e-8-registry-k8s-io-build-image-distroless-iptables-v0-7-6-q9JnbCuPRt2Q6Cyz +registry.k8s.io/build-image/distroless-iptables:v0.7.7 quay.io/openshift/community-e2e-images:e2e-8-registry-k8s-io-build-image-distroless-iptables-v0-7-7-RnQLl7s9Ix-ryHiD registry.k8s.io/cloud-provider-gcp/gcp-compute-persistent-disk-csi-driver:v1.4.0 quay.io/openshift/community-e2e-images:e2e-42-registry-k8s-io-cloud-provider-gcp-gcp-compute-persistent-disk-csi-driver-v1-4-0-mUHHjVVuv0UQiTyf registry.k8s.io/e2e-test-images/agnhost:2.53 quay.io/openshift/community-e2e-images:e2e-1-registry-k8s-io-e2e-test-images-agnhost-2-53-S5hiptYgC5MyFXZH registry.k8s.io/e2e-test-images/apparmor-loader:1.4 quay.io/openshift/community-e2e-images:e2e-4-registry-k8s-io-e2e-test-images-apparmor-loader-1-4-m-K7F-syWFeA4t03 diff --git a/vendor/k8s.io/apiserver/pkg/admission/audit.go b/vendor/k8s.io/apiserver/pkg/admission/audit.go index 7c0993f0908f..f9f90cd02475 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/audit.go +++ b/vendor/k8s.io/apiserver/pkg/admission/audit.go @@ -83,7 +83,7 @@ func ensureAnnotationGetter(a Attributes) error { } func (handler *auditHandler) logAnnotations(ctx context.Context, a Attributes) { - ae := audit.AuditEventFrom(ctx) + ae := audit.AuditContextFrom(ctx) if ae == nil { return } @@ -91,9 +91,9 @@ func (handler *auditHandler) logAnnotations(ctx context.Context, a Attributes) { var annotations map[string]string switch a := a.(type) { case privateAnnotationsGetter: - annotations = a.getAnnotations(ae.Level) + annotations = a.getAnnotations(ae.GetEventLevel()) case AnnotationsGetter: - annotations = a.GetAnnotations(ae.Level) + annotations = a.GetAnnotations(ae.GetEventLevel()) default: // this will never happen, because we have already checked it in ensureAnnotationGetter } diff --git a/vendor/k8s.io/apiserver/pkg/audit/context.go b/vendor/k8s.io/apiserver/pkg/audit/context.go index 9648587378ec..5b93d594bffa 100644 --- a/vendor/k8s.io/apiserver/pkg/audit/context.go +++ b/vendor/k8s.io/apiserver/pkg/audit/context.go @@ -18,10 +18,18 @@ package audit import ( "context" + "errors" + "maps" "sync" + "sync/atomic" + "time" + authnv1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/authentication/user" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/klog/v2" ) @@ -35,22 +43,223 @@ const auditKey key = iota // AuditContext holds the information for constructing the audit events for the current request. type AuditContext struct { - // RequestAuditConfig is the audit configuration that applies to the request - RequestAuditConfig RequestAuditConfig - - // Event is the audit Event object that is being captured to be written in + // initialized indicates whether requestAuditConfig and sink have been populated and are safe to read unguarded. + // This should only be set via Init(). + initialized atomic.Bool + // requestAuditConfig is the audit configuration that applies to the request. + // This should only be written via Init(RequestAuditConfig, Sink), and only read when initialized.Load() is true. + requestAuditConfig RequestAuditConfig + // sink is the sink to use when processing event stages. + // This should only be written via Init(RequestAuditConfig, Sink), and only read when initialized.Load() is true. + sink Sink + + // lock guards event + lock sync.Mutex + + // event is the audit Event object that is being captured to be written in // the API audit log. - Event auditinternal.Event + event auditinternal.Event - // annotationMutex guards event.Annotations - annotationMutex sync.Mutex + // unguarded copy of auditID from the event + auditID atomic.Value } // Enabled checks whether auditing is enabled for this audit context. func (ac *AuditContext) Enabled() bool { - // Note: An unset Level should be considered Enabled, so that request data (e.g. annotations) - // can still be captured before the audit policy is evaluated. - return ac != nil && ac.RequestAuditConfig.Level != auditinternal.LevelNone + if ac == nil { + // protect against nil pointers + return false + } + if !ac.initialized.Load() { + // Note: An unset Level should be considered Enabled, so that request data (e.g. annotations) + // can still be captured before the audit policy is evaluated. + return true + } + return ac.requestAuditConfig.Level != auditinternal.LevelNone +} + +func (ac *AuditContext) Init(requestAuditConfig RequestAuditConfig, sink Sink) error { + ac.lock.Lock() + defer ac.lock.Unlock() + if ac.initialized.Load() { + return errors.New("audit context was already initialized") + } + ac.requestAuditConfig = requestAuditConfig + ac.sink = sink + ac.event.Level = requestAuditConfig.Level + ac.initialized.Store(true) + return nil +} + +func (ac *AuditContext) AuditID() types.UID { + // return the unguarded copy of the auditID + id, _ := ac.auditID.Load().(types.UID) + return id +} + +func (ac *AuditContext) visitEvent(f func(event *auditinternal.Event)) { + ac.lock.Lock() + defer ac.lock.Unlock() + f(&ac.event) +} + +// ProcessEventStage returns true on success, false if there was an error processing the stage. +func (ac *AuditContext) ProcessEventStage(ctx context.Context, stage auditinternal.Stage) bool { + if ac == nil || !ac.initialized.Load() { + return true + } + if ac.sink == nil { + return true + } + for _, omitStage := range ac.requestAuditConfig.OmitStages { + if stage == omitStage { + return true + } + } + + processed := false + ac.visitEvent(func(event *auditinternal.Event) { + event.Stage = stage + if stage == auditinternal.StageRequestReceived { + event.StageTimestamp = event.RequestReceivedTimestamp + } else { + event.StageTimestamp = metav1.NewMicroTime(time.Now()) + } + + ObserveEvent(ctx) + processed = ac.sink.ProcessEvents(event) + }) + return processed +} + +func (ac *AuditContext) LogImpersonatedUser(user user.Info) { + ac.visitEvent(func(ev *auditinternal.Event) { + if ev == nil || ev.Level.Less(auditinternal.LevelMetadata) { + return + } + ev.ImpersonatedUser = &authnv1.UserInfo{ + Username: user.GetName(), + } + ev.ImpersonatedUser.Groups = user.GetGroups() + ev.ImpersonatedUser.UID = user.GetUID() + ev.ImpersonatedUser.Extra = map[string]authnv1.ExtraValue{} + for k, v := range user.GetExtra() { + ev.ImpersonatedUser.Extra[k] = authnv1.ExtraValue(v) + } + }) +} + +func (ac *AuditContext) LogResponseObject(status *metav1.Status, obj *runtime.Unknown) { + ac.visitEvent(func(ae *auditinternal.Event) { + if status != nil { + // selectively copy the bounded fields. + ae.ResponseStatus = &metav1.Status{ + Status: status.Status, + Message: status.Message, + Reason: status.Reason, + Details: status.Details, + Code: status.Code, + } + } + if ae.Level.Less(auditinternal.LevelRequestResponse) { + return + } + ae.ResponseObject = obj + }) +} + +// LogRequestPatch fills in the given patch as the request object into an audit event. +func (ac *AuditContext) LogRequestPatch(patch []byte) { + ac.visitEvent(func(ae *auditinternal.Event) { + ae.RequestObject = &runtime.Unknown{ + Raw: patch, + ContentType: runtime.ContentTypeJSON, + } + }) +} + +func (ac *AuditContext) GetEventAnnotation(key string) (string, bool) { + var val string + var ok bool + ac.visitEvent(func(event *auditinternal.Event) { + val, ok = event.Annotations[key] + }) + return val, ok +} + +func (ac *AuditContext) GetEventLevel() auditinternal.Level { + var level auditinternal.Level + ac.visitEvent(func(event *auditinternal.Event) { + level = event.Level + }) + return level +} + +func (ac *AuditContext) SetEventStage(stage auditinternal.Stage) { + ac.visitEvent(func(event *auditinternal.Event) { + event.Stage = stage + }) +} + +func (ac *AuditContext) GetEventStage() auditinternal.Stage { + var stage auditinternal.Stage + ac.visitEvent(func(event *auditinternal.Event) { + stage = event.Stage + }) + return stage +} + +func (ac *AuditContext) SetEventStageTimestamp(timestamp metav1.MicroTime) { + ac.visitEvent(func(event *auditinternal.Event) { + event.StageTimestamp = timestamp + }) +} + +func (ac *AuditContext) GetEventResponseStatus() *metav1.Status { + var status *metav1.Status + ac.visitEvent(func(event *auditinternal.Event) { + status = event.ResponseStatus + }) + return status +} + +func (ac *AuditContext) GetEventRequestReceivedTimestamp() metav1.MicroTime { + var timestamp metav1.MicroTime + ac.visitEvent(func(event *auditinternal.Event) { + timestamp = event.RequestReceivedTimestamp + }) + return timestamp +} + +func (ac *AuditContext) GetEventStageTimestamp() metav1.MicroTime { + var timestamp metav1.MicroTime + ac.visitEvent(func(event *auditinternal.Event) { + timestamp = event.StageTimestamp + }) + return timestamp +} + +func (ac *AuditContext) SetEventResponseStatus(status *metav1.Status) { + ac.visitEvent(func(event *auditinternal.Event) { + event.ResponseStatus = status + }) +} + +func (ac *AuditContext) SetEventResponseStatusCode(statusCode int32) { + ac.visitEvent(func(event *auditinternal.Event) { + if event.ResponseStatus == nil { + event.ResponseStatus = &metav1.Status{} + } + event.ResponseStatus.Code = statusCode + }) +} + +func (ac *AuditContext) GetEventAnnotations() map[string]string { + var annotations map[string]string + ac.visitEvent(func(event *auditinternal.Event) { + annotations = maps.Clone(event.Annotations) + }) + return annotations } // AddAuditAnnotation sets the audit annotation for the given key, value pair. @@ -66,8 +275,8 @@ func AddAuditAnnotation(ctx context.Context, key, value string) { return } - ac.annotationMutex.Lock() - defer ac.annotationMutex.Unlock() + ac.lock.Lock() + defer ac.lock.Unlock() addAuditAnnotationLocked(ac, key, value) } @@ -81,8 +290,8 @@ func AddAuditAnnotations(ctx context.Context, keysAndValues ...string) { return } - ac.annotationMutex.Lock() - defer ac.annotationMutex.Unlock() + ac.lock.Lock() + defer ac.lock.Unlock() if len(keysAndValues)%2 != 0 { klog.Errorf("Dropping mismatched audit annotation %q", keysAndValues[len(keysAndValues)-1]) @@ -100,8 +309,8 @@ func AddAuditAnnotationsMap(ctx context.Context, annotations map[string]string) return } - ac.annotationMutex.Lock() - defer ac.annotationMutex.Unlock() + ac.lock.Lock() + defer ac.lock.Unlock() for k, v := range annotations { addAuditAnnotationLocked(ac, k, v) @@ -110,8 +319,7 @@ func AddAuditAnnotationsMap(ctx context.Context, annotations map[string]string) // addAuditAnnotationLocked records the audit annotation on the event. func addAuditAnnotationLocked(ac *AuditContext, key, value string) { - ae := &ac.Event - + ae := &ac.event if ae.Annotations == nil { ae.Annotations = make(map[string]string) } @@ -128,15 +336,11 @@ func WithAuditContext(parent context.Context) context.Context { return parent // Avoid double registering. } - return genericapirequest.WithValue(parent, auditKey, &AuditContext{}) -} - -// AuditEventFrom returns the audit event struct on the ctx -func AuditEventFrom(ctx context.Context) *auditinternal.Event { - if ac := AuditContextFrom(ctx); ac.Enabled() { - return &ac.Event - } - return nil + return genericapirequest.WithValue(parent, auditKey, &AuditContext{ + event: auditinternal.Event{ + Stage: auditinternal.StageResponseStarted, + }, + }) } // AuditContextFrom returns the pair of the audit configuration object @@ -154,7 +358,10 @@ func WithAuditID(ctx context.Context, auditID types.UID) { return } if ac := AuditContextFrom(ctx); ac != nil { - ac.Event.AuditID = auditID + ac.visitEvent(func(event *auditinternal.Event) { + ac.auditID.Store(auditID) + event.AuditID = auditID + }) } } @@ -162,7 +369,8 @@ func WithAuditID(ctx context.Context, auditID types.UID) { // auditing is enabled. func AuditIDFrom(ctx context.Context) (types.UID, bool) { if ac := AuditContextFrom(ctx); ac != nil { - return ac.Event.AuditID, true + id, _ := ac.auditID.Load().(types.UID) + return id, true } return "", false } diff --git a/vendor/k8s.io/apiserver/pkg/audit/request.go b/vendor/k8s.io/apiserver/pkg/audit/request.go index 9185278f06fb..d5f9c730f518 100644 --- a/vendor/k8s.io/apiserver/pkg/audit/request.go +++ b/vendor/k8s.io/apiserver/pkg/audit/request.go @@ -40,110 +40,73 @@ const ( userAgentTruncateSuffix = "...TRUNCATED" ) -func LogRequestMetadata(ctx context.Context, req *http.Request, requestReceivedTimestamp time.Time, level auditinternal.Level, attribs authorizer.Attributes) { +func LogRequestMetadata(ctx context.Context, req *http.Request, requestReceivedTimestamp time.Time, attribs authorizer.Attributes) { ac := AuditContextFrom(ctx) if !ac.Enabled() { return } - ev := &ac.Event - - ev.RequestReceivedTimestamp = metav1.NewMicroTime(requestReceivedTimestamp) - ev.Verb = attribs.GetVerb() - ev.RequestURI = req.URL.RequestURI() - ev.UserAgent = maybeTruncateUserAgent(req) - ev.Level = level - - ips := utilnet.SourceIPs(req) - ev.SourceIPs = make([]string, len(ips)) - for i := range ips { - ev.SourceIPs[i] = ips[i].String() - } - if user := attribs.GetUser(); user != nil { - ev.User.Username = user.GetName() - ev.User.Extra = map[string]authnv1.ExtraValue{} - for k, v := range user.GetExtra() { - ev.User.Extra[k] = authnv1.ExtraValue(v) + ac.visitEvent(func(ev *auditinternal.Event) { + ev.RequestReceivedTimestamp = metav1.NewMicroTime(requestReceivedTimestamp) + ev.Verb = attribs.GetVerb() + ev.RequestURI = req.URL.RequestURI() + ev.UserAgent = maybeTruncateUserAgent(req) + + ips := utilnet.SourceIPs(req) + ev.SourceIPs = make([]string, len(ips)) + for i := range ips { + ev.SourceIPs[i] = ips[i].String() } - ev.User.Groups = user.GetGroups() - ev.User.UID = user.GetUID() - } - if attribs.IsResourceRequest() { - ev.ObjectRef = &auditinternal.ObjectReference{ - Namespace: attribs.GetNamespace(), - Name: attribs.GetName(), - Resource: attribs.GetResource(), - Subresource: attribs.GetSubresource(), - APIGroup: attribs.GetAPIGroup(), - APIVersion: attribs.GetAPIVersion(), + if user := attribs.GetUser(); user != nil { + ev.User.Username = user.GetName() + ev.User.Extra = map[string]authnv1.ExtraValue{} + for k, v := range user.GetExtra() { + ev.User.Extra[k] = authnv1.ExtraValue(v) + } + ev.User.Groups = user.GetGroups() + ev.User.UID = user.GetUID() } - } + + if attribs.IsResourceRequest() { + ev.ObjectRef = &auditinternal.ObjectReference{ + Namespace: attribs.GetNamespace(), + Name: attribs.GetName(), + Resource: attribs.GetResource(), + Subresource: attribs.GetSubresource(), + APIGroup: attribs.GetAPIGroup(), + APIVersion: attribs.GetAPIVersion(), + } + } + }) } // LogImpersonatedUser fills in the impersonated user attributes into an audit event. -func LogImpersonatedUser(ae *auditinternal.Event, user user.Info) { - if ae == nil || ae.Level.Less(auditinternal.LevelMetadata) { +func LogImpersonatedUser(ctx context.Context, user user.Info) { + ac := AuditContextFrom(ctx) + if !ac.Enabled() { return } - ae.ImpersonatedUser = &authnv1.UserInfo{ - Username: user.GetName(), - } - ae.ImpersonatedUser.Groups = user.GetGroups() - ae.ImpersonatedUser.UID = user.GetUID() - ae.ImpersonatedUser.Extra = map[string]authnv1.ExtraValue{} - for k, v := range user.GetExtra() { - ae.ImpersonatedUser.Extra[k] = authnv1.ExtraValue(v) - } + ac.LogImpersonatedUser(user) } // LogRequestObject fills in the request object into an audit event. The passed runtime.Object // will be converted to the given gv. func LogRequestObject(ctx context.Context, obj runtime.Object, objGV schema.GroupVersion, gvr schema.GroupVersionResource, subresource string, s runtime.NegotiatedSerializer) { - ae := AuditEventFrom(ctx) - if ae == nil || ae.Level.Less(auditinternal.LevelMetadata) { + ac := AuditContextFrom(ctx) + if !ac.Enabled() { return } - - // complete ObjectRef - if ae.ObjectRef == nil { - ae.ObjectRef = &auditinternal.ObjectReference{} - } - - // meta.Accessor is more general than ObjectMetaAccessor, but if it fails, we can just skip setting these bits - if meta, err := meta.Accessor(obj); err == nil { - if len(ae.ObjectRef.Namespace) == 0 { - ae.ObjectRef.Namespace = meta.GetNamespace() - } - if len(ae.ObjectRef.Name) == 0 { - ae.ObjectRef.Name = meta.GetName() - } - if len(ae.ObjectRef.UID) == 0 { - ae.ObjectRef.UID = meta.GetUID() - } - if len(ae.ObjectRef.ResourceVersion) == 0 { - ae.ObjectRef.ResourceVersion = meta.GetResourceVersion() - } - } - if len(ae.ObjectRef.APIVersion) == 0 { - ae.ObjectRef.APIGroup = gvr.Group - ae.ObjectRef.APIVersion = gvr.Version - } - if len(ae.ObjectRef.Resource) == 0 { - ae.ObjectRef.Resource = gvr.Resource - } - if len(ae.ObjectRef.Subresource) == 0 { - ae.ObjectRef.Subresource = subresource - } - - if ae.Level.Less(auditinternal.LevelRequest) { + if ac.GetEventLevel().Less(auditinternal.LevelMetadata) { return } - if shouldOmitManagedFields(ctx) { + // meta.Accessor is more general than ObjectMetaAccessor, but if it fails, we can just skip setting these bits + objMeta, _ := meta.Accessor(obj) + if shouldOmitManagedFields(ac) { copy, ok, err := copyWithoutManagedFields(obj) if err != nil { - klog.ErrorS(err, "Error while dropping managed fields from the request", "auditID", ae.AuditID) + klog.ErrorS(err, "Error while dropping managed fields from the request", "auditID", ac.AuditID()) } if ok { obj = copy @@ -151,54 +114,75 @@ func LogRequestObject(ctx context.Context, obj runtime.Object, objGV schema.Grou } // TODO(audit): hook into the serializer to avoid double conversion - var err error - ae.RequestObject, err = encodeObject(obj, objGV, s) + requestObject, err := encodeObject(obj, objGV, s) if err != nil { // TODO(audit): add error slice to audit event struct - klog.ErrorS(err, "Encoding failed of request object", "auditID", ae.AuditID, "gvr", gvr.String(), "obj", obj) + klog.ErrorS(err, "Encoding failed of request object", "auditID", ac.AuditID(), "gvr", gvr.String(), "obj", obj) return } + + ac.visitEvent(func(ae *auditinternal.Event) { + if ae.ObjectRef == nil { + ae.ObjectRef = &auditinternal.ObjectReference{} + } + + if objMeta != nil { + if len(ae.ObjectRef.Namespace) == 0 { + ae.ObjectRef.Namespace = objMeta.GetNamespace() + } + if len(ae.ObjectRef.Name) == 0 { + ae.ObjectRef.Name = objMeta.GetName() + } + if len(ae.ObjectRef.UID) == 0 { + ae.ObjectRef.UID = objMeta.GetUID() + } + if len(ae.ObjectRef.ResourceVersion) == 0 { + ae.ObjectRef.ResourceVersion = objMeta.GetResourceVersion() + } + } + if len(ae.ObjectRef.APIVersion) == 0 { + ae.ObjectRef.APIGroup = gvr.Group + ae.ObjectRef.APIVersion = gvr.Version + } + if len(ae.ObjectRef.Resource) == 0 { + ae.ObjectRef.Resource = gvr.Resource + } + if len(ae.ObjectRef.Subresource) == 0 { + ae.ObjectRef.Subresource = subresource + } + + if ae.Level.Less(auditinternal.LevelRequest) { + return + } + ae.RequestObject = requestObject + }) } // LogRequestPatch fills in the given patch as the request object into an audit event. func LogRequestPatch(ctx context.Context, patch []byte) { - ae := AuditEventFrom(ctx) - if ae == nil || ae.Level.Less(auditinternal.LevelRequest) { + ac := AuditContextFrom(ctx) + if ac.GetEventLevel().Less(auditinternal.LevelRequest) { return } - - ae.RequestObject = &runtime.Unknown{ - Raw: patch, - ContentType: runtime.ContentTypeJSON, - } + ac.LogRequestPatch(patch) } // LogResponseObject fills in the response object into an audit event. The passed runtime.Object // will be converted to the given gv. func LogResponseObject(ctx context.Context, obj runtime.Object, gv schema.GroupVersion, s runtime.NegotiatedSerializer) { - ae := AuditEventFrom(ctx) - if ae == nil || ae.Level.Less(auditinternal.LevelMetadata) { + ac := AuditContextFrom(WithAuditContext(ctx)) + status, _ := obj.(*metav1.Status) + if ac.GetEventLevel().Less(auditinternal.LevelMetadata) { return - } - if status, ok := obj.(*metav1.Status); ok { - // selectively copy the bounded fields. - ae.ResponseStatus = &metav1.Status{ - Status: status.Status, - Message: status.Message, - Reason: status.Reason, - Details: status.Details, - Code: status.Code, - } - } - - if ae.Level.Less(auditinternal.LevelRequestResponse) { + } else if ac.GetEventLevel().Less(auditinternal.LevelRequestResponse) { + ac.LogResponseObject(status, nil) return } - if shouldOmitManagedFields(ctx) { + if shouldOmitManagedFields(ac) { copy, ok, err := copyWithoutManagedFields(obj) if err != nil { - klog.ErrorS(err, "Error while dropping managed fields from the response", "auditID", ae.AuditID) + klog.ErrorS(err, "Error while dropping managed fields from the response", "auditID", ac.AuditID()) } if ok { obj = copy @@ -207,10 +191,11 @@ func LogResponseObject(ctx context.Context, obj runtime.Object, gv schema.GroupV // TODO(audit): hook into the serializer to avoid double conversion var err error - ae.ResponseObject, err = encodeObject(obj, gv, s) + responseObject, err := encodeObject(obj, gv, s) if err != nil { - klog.ErrorS(err, "Encoding failed of response object", "auditID", ae.AuditID, "obj", obj) + klog.ErrorS(err, "Encoding failed of response object", "auditID", ac.AuditID(), "obj", obj) } + ac.LogResponseObject(status, responseObject) } func encodeObject(obj runtime.Object, gv schema.GroupVersion, serializer runtime.NegotiatedSerializer) (*runtime.Unknown, error) { @@ -301,9 +286,9 @@ func removeManagedFields(obj runtime.Object) error { return nil } -func shouldOmitManagedFields(ctx context.Context) bool { - if auditContext := AuditContextFrom(ctx); auditContext != nil { - return auditContext.RequestAuditConfig.OmitManagedFields +func shouldOmitManagedFields(ac *AuditContext) bool { + if ac != nil && ac.initialized.Load() && ac.requestAuditConfig.OmitManagedFields { + return true } // If we can't decide, return false to maintain current behavior which is diff --git a/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator.go b/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator.go index 18167dddc2bf..9d1556e63365 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator.go @@ -33,7 +33,6 @@ import ( "golang.org/x/sync/singleflight" apierrors "k8s.io/apimachinery/pkg/api/errors" - auditinternal "k8s.io/apiserver/pkg/apis/audit" "k8s.io/apiserver/pkg/audit" "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/warning" @@ -199,12 +198,9 @@ func (a *cachedTokenAuthenticator) doAuthenticateToken(ctx context.Context, toke ctx = audit.WithAuditContext(ctx) ac := audit.AuditContextFrom(ctx) - // since this is shared work between multiple requests, we have no way of knowing if any - // particular request supports audit annotations. thus we always attempt to record them. - ac.Event.Level = auditinternal.LevelMetadata record.resp, record.ok, record.err = a.authenticator.AuthenticateToken(ctx, token) - record.annotations = ac.Event.Annotations + record.annotations = ac.GetEventAnnotations() record.warnings = recorder.extractWarnings() if !a.cacheErrs && record.err != nil { diff --git a/vendor/k8s.io/apiserver/pkg/cel/common/schemas.go b/vendor/k8s.io/apiserver/pkg/cel/common/schemas.go index 19392babeb2c..909284166abe 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/common/schemas.go +++ b/vendor/k8s.io/apiserver/pkg/cel/common/schemas.go @@ -55,8 +55,15 @@ func SchemaDeclType(s Schema, isResourceRoot bool) *apiservercel.DeclType { // `type(intOrStringField) == int ? intOrStringField < 5 : double(intOrStringField.replace('%', '')) < 0.5 // dyn := apiservercel.NewSimpleTypeWithMinSize("dyn", cel.DynType, nil, 1) // smallest value for a serialized x-kubernetes-int-or-string is 0 - // handle x-kubernetes-int-or-string by returning the max length/min serialized size of the largest possible string - dyn.MaxElements = maxRequestSizeBytes - 2 + + // If the schema has a maxlength constraint, bound the max elements based on the max length. + // Otherwise, fallback to the max request size. + if s.MaxLength() != nil { + dyn.MaxElements = estimateMaxElementsFromMaxLength(s) + } else { + dyn.MaxElements = estimateMaxStringLengthPerRequest(s) + } + return dyn } @@ -159,11 +166,7 @@ func SchemaDeclType(s Schema, isResourceRoot bool) *apiservercel.DeclType { strWithMaxLength := apiservercel.NewSimpleTypeWithMinSize("string", cel.StringType, types.String(""), apiservercel.MinStringSize) if s.MaxLength() != nil { - // multiply the user-provided max length by 4 in the case of an otherwise-untyped string - // we do this because the OpenAPIv3 spec indicates that maxLength is specified in runes/code points, - // but we need to reason about length for things like request size, so we use bytes in this code (and an individual - // unicode code point can be up to 4 bytes long) - strWithMaxLength.MaxElements = zeroIfNegative(*s.MaxLength()) * 4 + strWithMaxLength.MaxElements = estimateMaxElementsFromMaxLength(s) } else { if len(s.Enum()) > 0 { strWithMaxLength.MaxElements = estimateMaxStringEnumLength(s) @@ -228,6 +231,7 @@ func WithTypeAndObjectMeta(s *spec.Schema) *spec.Schema { // must only be called on schemas of type "string" or x-kubernetes-int-or-string: true func estimateMaxStringLengthPerRequest(s Schema) int64 { if s.IsXIntOrString() { + // handle x-kubernetes-int-or-string by returning the max length/min serialized size of the largest possible string return maxRequestSizeBytes - 2 } switch s.Format() { @@ -272,3 +276,13 @@ func estimateMaxAdditionalPropertiesFromMinSize(minSize int64) int64 { // subtract 2 to account for { and } return (maxRequestSizeBytes - 2) / keyValuePairSize } + +// estimateMaxElementsFromMaxLength estimates the maximum number of elements for a string schema +// that is bound with a maxLength constraint. +func estimateMaxElementsFromMaxLength(s Schema) int64 { + // multiply the user-provided max length by 4 in the case of an otherwise-untyped string + // we do this because the OpenAPIv3 spec indicates that maxLength is specified in runes/code points, + // but we need to reason about length for things like request size, so we use bytes in this code (and an individual + // unicode code point can be up to 4 bytes long) + return zeroIfNegative(*s.MaxLength()) * 4 +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit.go index 6f850f728bfd..d25bf35ae3af 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit.go @@ -44,7 +44,7 @@ func WithAudit(handler http.Handler, sink audit.Sink, policy audit.PolicyRuleEva return handler } return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - ac, err := evaluatePolicyAndCreateAuditEvent(req, policy) + ac, err := evaluatePolicyAndCreateAuditEvent(req, policy, sink) if err != nil { utilruntime.HandleError(fmt.Errorf("failed to create audit event: %v", err)) responsewriters.InternalError(w, req, errors.New("failed to create audit event")) @@ -55,41 +55,37 @@ func WithAudit(handler http.Handler, sink audit.Sink, policy audit.PolicyRuleEva handler.ServeHTTP(w, req) return } - ev := &ac.Event ctx := req.Context() - omitStages := ac.RequestAuditConfig.OmitStages - ev.Stage = auditinternal.StageRequestReceived - if processed := processAuditEvent(ctx, sink, ev, omitStages); !processed { + if processed := ac.ProcessEventStage(ctx, auditinternal.StageRequestReceived); !processed { audit.ApiserverAuditDroppedCounter.WithContext(ctx).Inc() responsewriters.InternalError(w, req, errors.New("failed to store audit event")) return } // intercept the status code - var longRunningSink audit.Sink + isLongRunning := false if longRunningCheck != nil { ri, _ := request.RequestInfoFrom(ctx) if longRunningCheck(req, ri) { - longRunningSink = sink + isLongRunning = true } } - respWriter := decorateResponseWriter(ctx, w, ev, longRunningSink, omitStages) + respWriter := decorateResponseWriter(ctx, w, isLongRunning) // send audit event when we leave this func, either via a panic or cleanly. In the case of long // running requests, this will be the second audit event. defer func() { if r := recover(); r != nil { defer panic(r) - ev.Stage = auditinternal.StagePanic - ev.ResponseStatus = &metav1.Status{ + ac.SetEventResponseStatus(&metav1.Status{ Code: http.StatusInternalServerError, Status: metav1.StatusFailure, Reason: metav1.StatusReasonInternalError, Message: fmt.Sprintf("APIServer panic'd: %v", r), - } - processAuditEvent(ctx, sink, ev, omitStages) + }) + ac.ProcessEventStage(ctx, auditinternal.StagePanic) return } @@ -100,27 +96,25 @@ func WithAudit(handler http.Handler, sink audit.Sink, policy audit.PolicyRuleEva Status: metav1.StatusSuccess, Message: "Connection closed early", } - if ev.ResponseStatus == nil && longRunningSink != nil { - ev.ResponseStatus = fakedSuccessStatus - ev.Stage = auditinternal.StageResponseStarted - processAuditEvent(ctx, longRunningSink, ev, omitStages) - } - - ev.Stage = auditinternal.StageResponseComplete - if ev.ResponseStatus == nil { - ev.ResponseStatus = fakedSuccessStatus + if ac.GetEventResponseStatus() == nil { + ac.SetEventResponseStatus(fakedSuccessStatus) + if isLongRunning { + // A nil ResponseStatus means the writer never processed the ResponseStarted stage, so do that now. + ac.ProcessEventStage(ctx, auditinternal.StageResponseStarted) + } } - processAuditEvent(ctx, sink, ev, omitStages) + writeLatencyToAnnotation(ctx) + ac.ProcessEventStage(ctx, auditinternal.StageResponseComplete) }() handler.ServeHTTP(respWriter, req) }) } // evaluatePolicyAndCreateAuditEvent is responsible for evaluating the audit -// policy configuration applicable to the request and create a new audit -// event that will be written to the API audit log. +// policy configuration applicable to the request and initializing the audit +// context with the audit config for the request, the sink to write to, and the request metadata. // - error if anything bad happened -func evaluatePolicyAndCreateAuditEvent(req *http.Request, policy audit.PolicyRuleEvaluator) (*audit.AuditContext, error) { +func evaluatePolicyAndCreateAuditEvent(req *http.Request, policy audit.PolicyRuleEvaluator, sink audit.Sink) (*audit.AuditContext, error) { ctx := req.Context() ac := audit.AuditContextFrom(ctx) if ac == nil { @@ -135,7 +129,10 @@ func evaluatePolicyAndCreateAuditEvent(req *http.Request, policy audit.PolicyRul rac := policy.EvaluatePolicyRule(attribs) audit.ObservePolicyLevel(ctx, rac.Level) - ac.RequestAuditConfig = rac + err = ac.Init(rac, sink) + if err != nil { + return nil, fmt.Errorf("failed to initialize audit context: %w", err) + } if rac.Level == auditinternal.LevelNone { // Don't audit. return ac, nil @@ -145,7 +142,7 @@ func evaluatePolicyAndCreateAuditEvent(req *http.Request, policy audit.PolicyRul if !ok { requestReceivedTimestamp = time.Now() } - audit.LogRequestMetadata(ctx, req, requestReceivedTimestamp, rac.Level, attribs) + audit.LogRequestMetadata(ctx, req, requestReceivedTimestamp, attribs) return ac, nil } @@ -153,13 +150,14 @@ func evaluatePolicyAndCreateAuditEvent(req *http.Request, policy audit.PolicyRul // writeLatencyToAnnotation writes the latency incurred in different // layers of the apiserver to the annotations of the audit object. // it should be invoked after ev.StageTimestamp has been set appropriately. -func writeLatencyToAnnotation(ctx context.Context, ev *auditinternal.Event) { +func writeLatencyToAnnotation(ctx context.Context) { + ac := audit.AuditContextFrom(ctx) // we will track latency in annotation only when the total latency // of the given request exceeds 500ms, this is in keeping with the // traces in rest/handlers for create, delete, update, // get, list, and deletecollection. const threshold = 500 * time.Millisecond - latency := ev.StageTimestamp.Time.Sub(ev.RequestReceivedTimestamp.Time) + latency := ac.GetEventStageTimestamp().Sub(ac.GetEventRequestReceivedTimestamp().Time) if latency <= threshold { return } @@ -177,34 +175,12 @@ func writeLatencyToAnnotation(ctx context.Context, ev *auditinternal.Event) { audit.AddAuditAnnotationsMap(ctx, layerLatencies) } -func processAuditEvent(ctx context.Context, sink audit.Sink, ev *auditinternal.Event, omitStages []auditinternal.Stage) bool { - for _, stage := range omitStages { - if ev.Stage == stage { - return true - } - } - - switch { - case ev.Stage == auditinternal.StageRequestReceived: - ev.StageTimestamp = metav1.NewMicroTime(ev.RequestReceivedTimestamp.Time) - case ev.Stage == auditinternal.StageResponseComplete: - ev.StageTimestamp = metav1.NewMicroTime(time.Now()) - writeLatencyToAnnotation(ctx, ev) - default: - ev.StageTimestamp = metav1.NewMicroTime(time.Now()) - } - - audit.ObserveEvent(ctx) - return sink.ProcessEvents(ev) -} - -func decorateResponseWriter(ctx context.Context, responseWriter http.ResponseWriter, ev *auditinternal.Event, sink audit.Sink, omitStages []auditinternal.Stage) http.ResponseWriter { +func decorateResponseWriter(ctx context.Context, responseWriter http.ResponseWriter, processResponseStartedStage bool) http.ResponseWriter { delegate := &auditResponseWriter{ ctx: ctx, ResponseWriter: responseWriter, - event: ev, - sink: sink, - omitStages: omitStages, + + processResponseStartedStage: processResponseStartedStage, } return responsewriter.WrapForHTTP1Or2(delegate) @@ -217,11 +193,10 @@ var _ responsewriter.UserProvidedDecorator = &auditResponseWriter{} // create immediately an event (for long running requests). type auditResponseWriter struct { http.ResponseWriter - ctx context.Context - event *auditinternal.Event - once sync.Once - sink audit.Sink - omitStages []auditinternal.Stage + ctx context.Context + once sync.Once + + processResponseStartedStage bool } func (a *auditResponseWriter) Unwrap() http.ResponseWriter { @@ -230,14 +205,10 @@ func (a *auditResponseWriter) Unwrap() http.ResponseWriter { func (a *auditResponseWriter) processCode(code int) { a.once.Do(func() { - if a.event.ResponseStatus == nil { - a.event.ResponseStatus = &metav1.Status{} - } - a.event.ResponseStatus.Code = int32(code) - a.event.Stage = auditinternal.StageResponseStarted - - if a.sink != nil { - processAuditEvent(a.ctx, a.sink, a.event, a.omitStages) + ac := audit.AuditContextFrom(a.ctx) + ac.SetEventResponseStatusCode(int32(code)) + if a.processResponseStartedStage { + ac.ProcessEventStage(a.ctx, auditinternal.StageResponseStarted) } }) } diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/authn_audit.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/authn_audit.go index 4bd6bbc13966..d9cdcd2d62d1 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/filters/authn_audit.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/authn_audit.go @@ -24,7 +24,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - auditinternal "k8s.io/apiserver/pkg/apis/audit" "k8s.io/apiserver/pkg/audit" "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" ) @@ -36,7 +35,7 @@ func WithFailedAuthenticationAudit(failedHandler http.Handler, sink audit.Sink, return failedHandler } return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - ac, err := evaluatePolicyAndCreateAuditEvent(req, policy) + ac, err := evaluatePolicyAndCreateAuditEvent(req, policy, sink) if err != nil { utilruntime.HandleError(fmt.Errorf("failed to create audit event: %v", err)) responsewriters.InternalError(w, req, errors.New("failed to create audit event")) @@ -47,13 +46,11 @@ func WithFailedAuthenticationAudit(failedHandler http.Handler, sink audit.Sink, failedHandler.ServeHTTP(w, req) return } - ev := &ac.Event - ev.ResponseStatus = &metav1.Status{} - ev.ResponseStatus.Message = getAuthMethods(req) - ev.Stage = auditinternal.StageResponseStarted - - rw := decorateResponseWriter(req.Context(), w, ev, sink, ac.RequestAuditConfig.OmitStages) + ac.SetEventResponseStatus(&metav1.Status{ + Message: getAuthMethods(req), + }) + rw := decorateResponseWriter(req.Context(), w, true) failedHandler.ServeHTTP(rw, req) }) } diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go index a6d293a15908..aa47a7536d01 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go @@ -166,8 +166,7 @@ func WithImpersonation(handler http.Handler, a authorizer.Authorizer, s runtime. oldUser, _ := request.UserFrom(ctx) httplog.LogOf(req, w).Addf("%v is impersonating %v", userString(oldUser), userString(newUser)) - ae := audit.AuditEventFrom(ctx) - audit.LogImpersonatedUser(ae, newUser) + audit.LogImpersonatedUser(audit.WithAuditContext(ctx), newUser) // clear all the impersonation headers from the request req.Header.Del(authenticationv1.ImpersonateUserHeader) diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/request_deadline.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/request_deadline.go index 7497bc38a424..066d670a2ad1 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/filters/request_deadline.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/request_deadline.go @@ -108,7 +108,7 @@ func withFailedRequestAudit(failedHandler http.Handler, statusErr *apierrors.Sta return failedHandler } return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - ac, err := evaluatePolicyAndCreateAuditEvent(req, policy) + ac, err := evaluatePolicyAndCreateAuditEvent(req, policy, sink) if err != nil { utilruntime.HandleError(fmt.Errorf("failed to create audit event: %v", err)) responsewriters.InternalError(w, req, errors.New("failed to create audit event")) @@ -119,15 +119,15 @@ func withFailedRequestAudit(failedHandler http.Handler, statusErr *apierrors.Sta failedHandler.ServeHTTP(w, req) return } - ev := &ac.Event - ev.ResponseStatus = &metav1.Status{} - ev.Stage = auditinternal.StageResponseStarted + respStatus := &metav1.Status{} if statusErr != nil { - ev.ResponseStatus.Message = statusErr.Error() + respStatus.Message = statusErr.Error() } + ac.SetEventResponseStatus(respStatus) + ac.SetEventStage(auditinternal.StageResponseStarted) - rw := decorateResponseWriter(req.Context(), w, ev, sink, ac.RequestAuditConfig.OmitStages) + rw := decorateResponseWriter(req.Context(), w, true) failedHandler.ServeHTTP(rw, req) }) } diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/delegator.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/delegator.go index ac17fb1c8878..10d2ce4c8117 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/delegator.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/delegator.go @@ -206,6 +206,7 @@ func (c *CacheDelegator) GetList(ctx context.Context, key string, opts storage.L return c.storage.GetList(ctx, key, opts, listObj) } } + fallbackOpts := opts if result.ConsistentRead { listRV, err = c.storage.GetCurrentResourceVersion(ctx) if err != nil { @@ -213,20 +214,28 @@ func (c *CacheDelegator) GetList(ctx context.Context, key string, opts storage.L } // Setting resource version for consistent read in cache based on current ResourceVersion in etcd. opts.ResourceVersion = strconv.FormatInt(int64(listRV), 10) + // If continue is not set, we need to set the resource version match to ResourceVersionMatchNotOlderThan to serve latest from cache + if opts.Predicate.Continue == "" { + opts.ResourceVersionMatch = metav1.ResourceVersionMatchNotOlderThan + } } err = c.cacher.GetList(ctx, key, opts, listObj) success := "true" fallback := "false" if err != nil { - if errors.IsResourceExpired(err) { - return c.storage.GetList(ctx, key, opts, listObj) + // ResourceExpired error occurs when attempting to list from cache with a specific resourceVersion + // that is no longer available in the cache. With ListFromCacheSnapshot feature (1.34+), we can + // serve exact resourceVersion requests from cache if available, falling back to storage only when + // the requested version is expired. + if errors.IsResourceExpired(err) && utilfeature.DefaultFeatureGate.Enabled(features.ListFromCacheSnapshot) { + return c.storage.GetList(ctx, key, fallbackOpts, listObj) } if result.ConsistentRead { + // IsTooLargeResourceVersion occurs when the requested RV is higher than cache's current RV + // and cache hasn't caught up within the timeout period. Fall back to etcd. if storage.IsTooLargeResourceVersion(err) { fallback = "true" - // Reset resourceVersion during fallback from consistent read. - opts.ResourceVersion = "" - err = c.storage.GetList(ctx, key, opts, listObj) + err = c.storage.GetList(ctx, key, fallbackOpts, listObj) } if err != nil { success = "false" diff --git a/vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go b/vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go index b03640ae8df1..8552e91eb533 100644 --- a/vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go +++ b/vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go @@ -83,6 +83,7 @@ func NewGenericWebhook(scheme *runtime.Scheme, codecFactory serializer.CodecFact clientConfig := rest.CopyConfig(config) codec := codecFactory.LegacyCodec(groupVersions...) + clientConfig.ContentType = runtime.ContentTypeJSON clientConfig.ContentConfig.NegotiatedSerializer = serializer.NegotiatedSerializerWrapper(runtime.SerializerInfo{Serializer: codec}) clientConfig.Wrap(x509metrics.NewDeprecatedCertificateRoundTripperWrapperConstructor( diff --git a/vendor/k8s.io/component-helpers/resource/helpers.go b/vendor/k8s.io/component-helpers/resource/helpers.go index 780db5424516..7ff5bef111db 100644 --- a/vendor/k8s.io/component-helpers/resource/helpers.go +++ b/vendor/k8s.io/component-helpers/resource/helpers.go @@ -404,7 +404,12 @@ func maxResourceList(list, newList v1.ResourceList) { // max returns the result of max(a, b...) for each named resource and is only used if we can't // accumulate into an existing resource list func max(a v1.ResourceList, b ...v1.ResourceList) v1.ResourceList { - result := a.DeepCopy() + var result v1.ResourceList + if a != nil { + result = a.DeepCopy() + } else { + result = v1.ResourceList{} + } for _, other := range b { maxResourceList(result, other) } diff --git a/vendor/k8s.io/kubernetes/openshift-hack/e2e/annotate/generated/zz_generated.annotations.go b/vendor/k8s.io/kubernetes/openshift-hack/e2e/annotate/generated/zz_generated.annotations.go index 0a564ac269a9..c972e6606825 100644 --- a/vendor/k8s.io/kubernetes/openshift-hack/e2e/annotate/generated/zz_generated.annotations.go +++ b/vendor/k8s.io/kubernetes/openshift-hack/e2e/annotate/generated/zz_generated.annotations.go @@ -2355,6 +2355,10 @@ var Annotations = map[string]string{ "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should unstage RWOP volume when starting a second pod with different SELinux context [FeatureGate:SELinuxMountReadWriteOncePod] [Beta]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is bumped on a privileged and unprivileged Pod with given SELinux with MountOption policy [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxChangePolicy] [Beta] [FeatureGate:SELinuxMount] [Beta] [Feature:OffByDefault]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is bumped on an unprivileged and privileged Pod with given SELinux with MountOption policy [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxChangePolicy] [Beta] [FeatureGate:SELinuxMount] [Beta] [Feature:OffByDefault]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + "[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is bumped on two Pods with MountOption policy and a different context on RWOP volume [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxChangePolicy] [Beta] [FeatureGate:SELinuxMount] [Beta] [Feature:OffByDefault]": " [Suite:openshift/conformance/serial] [Suite:k8s]", "[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is bumped on two Pods with a different context on RWO volume and SELinuxMount enabled [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxChangePolicy] [Beta] [FeatureGate:SELinuxMount] [Beta] [Feature:OffByDefault]": " [Suite:openshift/conformance/serial] [Suite:k8s]", @@ -2369,13 +2373,19 @@ var Annotations = map[string]string{ "[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is bumped on two Pods with a different policy on RWO volume and SELinuxMount enabled (nil + Recursive) [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxChangePolicy] [Beta] [FeatureGate:SELinuxMount] [Beta] [Feature:OffByDefault]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + "[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is not bumped on a privileged and unprivileged Pod with given SELinux context and recursive policy [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxChangePolicy] [Beta] [FeatureGate:SELinuxMount] [Beta] [Feature:OffByDefault]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + "[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is not bumped on two Pods with Recursive policy and a different context on RWX volume [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxChangePolicy] [Beta] [FeatureGate:SELinuxMount] [Beta] [Feature:OffByDefault]": " [Suite:openshift/conformance/serial] [Suite:k8s]", - "[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is not bumped on two Pods with a different policy RWX volume (MountOption + MountOption) [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxChangePolicy] [Beta] [FeatureGate:SELinuxMount] [Beta] [Feature:OffByDefault]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + "[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is not bumped on two Pods with the same context on RWO volume and SELinuxMount enabled [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxChangePolicy] [Beta] [FeatureGate:SELinuxMount] [Beta] [Feature:OffByDefault]": " [Suite:openshift/conformance/serial] [Suite:k8s]", - "[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is not bumped on two Pods with a different policy RWX volume (nil + MountOption) [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxChangePolicy] [Beta] [FeatureGate:SELinuxMount] [Beta] [Feature:OffByDefault]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + "[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is not bumped on two Pods with the same policy RWX volume (MountOption + MountOption) [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxChangePolicy] [Beta] [FeatureGate:SELinuxMount] [Beta] [Feature:OffByDefault]": " [Suite:openshift/conformance/serial] [Suite:k8s]", - "[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is not bumped on two Pods with the same context on RWO volume and SELinuxMount enabled [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxChangePolicy] [Beta] [FeatureGate:SELinuxMount] [Beta] [Feature:OffByDefault]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + "[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is not bumped on two Pods with the same policy RWX volume (nil + MountOption) [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxChangePolicy] [Beta] [FeatureGate:SELinuxMount] [Beta] [Feature:OffByDefault]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is not bumped on two privileged Pods with mount policy RWO volume [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxChangePolicy] [Beta] [FeatureGate:SELinuxMount] [Beta] [Feature:OffByDefault]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is not bumped on two privileged Pods with recursive policy RWO volume [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxChangePolicy] [Beta] [FeatureGate:SELinuxMount] [Beta] [Feature:OffByDefault]": " [Suite:openshift/conformance/serial] [Suite:k8s]", "[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] warning is bumped on two Pods with a different context on RWO volume [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxChangePolicy] [Beta] [Feature:SELinuxMountReadWriteOncePodOnly]": " [Suite:openshift/conformance/serial] [Suite:k8s]", diff --git a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/storage/performantsecuritypolicy/admission.go b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/storage/performantsecuritypolicy/admission.go index 039bb03bbb3e..e3bda3dbe06c 100644 --- a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/storage/performantsecuritypolicy/admission.go +++ b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/storage/performantsecuritypolicy/admission.go @@ -30,6 +30,9 @@ var ( _ = initializer.WantsExternalKubeInformerFactory(&performantSecurityPolicy{}) _ = admission.MutationInterface(&performantSecurityPolicy{}) _ = initializer.WantsFeatures(&performantSecurityPolicy{}) + + fsGroupPolicyPodAuditLabel = fmt.Sprintf("%s-pod", fsGroupChangePolicyLabel) + selinuxPolicyPodAuditLabel = fmt.Sprintf("%s-pod", selinuxChangePolicyLabel) ) func Register(plugins *admission.Plugins) { @@ -98,7 +101,7 @@ func (c *performantSecurityPolicy) Admit(ctx context.Context, attributes admissi currentFSGroupChangePolicy = getDefaultFSGroupChangePolicy(ctx, ns) if currentFSGroupChangePolicy != nil { klog.V(4).Infof("Setting default FSGroupChangePolicy %s for pod %s", *currentFSGroupChangePolicy, podNameKey) - audit.AddAuditAnnotations(ctx, "fsGroupChangePolicy", string(*currentFSGroupChangePolicy), "pod", podNameKey) + audit.AddAuditAnnotations(ctx, fsGroupChangePolicyLabel, string(*currentFSGroupChangePolicy), fsGroupPolicyPodAuditLabel, podNameKey) if pod.Spec.SecurityContext != nil { pod.Spec.SecurityContext.FSGroupChangePolicy = currentFSGroupChangePolicy } else { @@ -114,7 +117,7 @@ func (c *performantSecurityPolicy) Admit(ctx context.Context, attributes admissi currentSELinuxChangePolicy = getDefaultSELinuxChangePolicy(ctx, ns) if currentSELinuxChangePolicy != nil { klog.V(4).Infof("Setting default SELinuxChangePolicy %s for pod %s", *currentSELinuxChangePolicy, podNameKey) - audit.AddAuditAnnotations(ctx, "selinuxChangePolicy", string(*currentSELinuxChangePolicy), "pod", podNameKey) + audit.AddAuditAnnotations(ctx, selinuxChangePolicyLabel, string(*currentSELinuxChangePolicy), selinuxPolicyPodAuditLabel, podNameKey) if pod.Spec.SecurityContext != nil { pod.Spec.SecurityContext.SELinuxChangePolicy = currentSELinuxChangePolicy } else { diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/allocation/allocation_manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/allocation/allocation_manager.go index 5287ba169b11..2eb701c0b9ac 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/allocation/allocation_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/allocation/allocation_manager.go @@ -109,14 +109,20 @@ func (m *manager) GetContainerResourceAllocation(podUID types.UID, containerName // UpdatePodFromAllocation overwrites the pod spec with the allocation. // This function does a deep copy only if updates are needed. func (m *manager) UpdatePodFromAllocation(pod *v1.Pod) (*v1.Pod, bool) { - // TODO(tallclair): This clones the whole cache, but we only need 1 pod. - allocs := m.allocated.GetPodResourceInfoMap() - return updatePodFromAllocation(pod, allocs) + if pod == nil { + return pod, false + } + + allocated, ok := m.allocated.GetPodResourceInfo(pod.UID) + if !ok { + return pod, false + } + + return updatePodFromAllocation(pod, allocated) } -func updatePodFromAllocation(pod *v1.Pod, allocs state.PodResourceInfoMap) (*v1.Pod, bool) { - allocated, found := allocs[pod.UID] - if !found { +func updatePodFromAllocation(pod *v1.Pod, allocated state.PodResourceInfo) (*v1.Pod, bool) { + if pod == nil { return pod, false } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/allocation/state/state.go b/vendor/k8s.io/kubernetes/pkg/kubelet/allocation/state/state.go index 96a2421f08ff..8022e10413c3 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/allocation/state/state.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/allocation/state/state.go @@ -50,6 +50,7 @@ func (pr PodResourceInfoMap) Clone() PodResourceInfoMap { type Reader interface { GetContainerResources(podUID types.UID, containerName string) (v1.ResourceRequirements, bool) GetPodResourceInfoMap() PodResourceInfoMap + GetPodResourceInfo(podUID types.UID) (PodResourceInfo, bool) } type writer interface { diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/allocation/state/state_checkpoint.go b/vendor/k8s.io/kubernetes/pkg/kubelet/allocation/state/state_checkpoint.go index f6c5ce78c435..f41415c0152d 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/allocation/state/state_checkpoint.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/allocation/state/state_checkpoint.go @@ -112,13 +112,20 @@ func (sc *stateCheckpoint) GetContainerResources(podUID types.UID, containerName return sc.cache.GetContainerResources(podUID, containerName) } -// GetPodResourceInfoMap returns current pod resource information +// GetPodResourceInfoMap returns current pod resource information map func (sc *stateCheckpoint) GetPodResourceInfoMap() PodResourceInfoMap { sc.mux.RLock() defer sc.mux.RUnlock() return sc.cache.GetPodResourceInfoMap() } +// GetPodResourceInfo returns current pod resource information +func (sc *stateCheckpoint) GetPodResourceInfo(podUID types.UID) (PodResourceInfo, bool) { + sc.mux.RLock() + defer sc.mux.RUnlock() + return sc.cache.GetPodResourceInfo(podUID) +} + // SetContainerResoruces sets resources information for a pod's container func (sc *stateCheckpoint) SetContainerResources(podUID types.UID, containerName string, resources v1.ResourceRequirements) error { sc.mux.Lock() @@ -172,6 +179,10 @@ func (sc *noopStateCheckpoint) GetPodResourceInfoMap() PodResourceInfoMap { return nil } +func (sc *noopStateCheckpoint) GetPodResourceInfo(_ types.UID) (PodResourceInfo, bool) { + return PodResourceInfo{}, false +} + func (sc *noopStateCheckpoint) SetContainerResources(_ types.UID, _ string, _ v1.ResourceRequirements) error { return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/allocation/state/state_mem.go b/vendor/k8s.io/kubernetes/pkg/kubelet/allocation/state/state_mem.go index e7e44503c642..e4b5210524bb 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/allocation/state/state_mem.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/allocation/state/state_mem.go @@ -65,6 +65,14 @@ func (s *stateMemory) GetPodResourceInfoMap() PodResourceInfoMap { return s.podResources.Clone() } +func (s *stateMemory) GetPodResourceInfo(podUID types.UID) (PodResourceInfo, bool) { + s.RLock() + defer s.RUnlock() + + resourceInfo, ok := s.podResources[podUID] + return resourceInfo, ok +} + func (s *stateMemory) SetContainerResources(podUID types.UID, containerName string, resources v1.ResourceRequirements) error { s.Lock() defer s.Unlock() diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/noderestriction/admission.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/noderestriction/admission.go index 5c9502cb28a6..f40d0fc6be6e 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/noderestriction/admission.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/noderestriction/admission.go @@ -536,6 +536,11 @@ func (p *Plugin) admitNode(nodeName string, a admission.Attributes) error { return admission.NewForbidden(a, fmt.Errorf("node %q is not allowed to modify taints", nodeName)) } + // Don't allow a node to update its own ownerReferences. + if !apiequality.Semantic.DeepEqual(node.OwnerReferences, oldNode.OwnerReferences) { + return admission.NewForbidden(a, fmt.Errorf("node %q is not allowed to modify ownerReferences", nodeName)) + } + // Don't allow a node to update labels outside the allowed set. // This would allow a node to add or modify its labels in a way that would let it steer privileged workloads to itself. modifiedLabels := getModifiedLabels(node.Labels, oldNode.Labels) diff --git a/vendor/k8s.io/kubernetes/test/e2e/network/service_cidrs.go b/vendor/k8s.io/kubernetes/test/e2e/network/service_cidrs.go index 6f3d415ec17b..f445e94ff79a 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/network/service_cidrs.go +++ b/vendor/k8s.io/kubernetes/test/e2e/network/service_cidrs.go @@ -167,33 +167,6 @@ var _ = common.SIGDescribe("ServiceCIDR and IPAddress API", func() { framework.Failf("unexpected error getting default ServiceCIDR: %v", err) } - ginkgo.By("patching") - patchedServiceCIDR, err := f.ClientSet.NetworkingV1().ServiceCIDRs().Patch(ctx, defaultservicecidr.DefaultServiceCIDRName, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{}) - if err != nil { - framework.Failf("unexpected error patching IPAddress: %v", err) - } - if v, ok := patchedServiceCIDR.Annotations["patched"]; !ok || v != "true" { - framework.Failf("patched object should have the applied annotation") - } - - ginkgo.By("updating") - var cidrToUpdate, updatedCIDR *networkingv1.ServiceCIDR - err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - cidrToUpdate, err = f.ClientSet.NetworkingV1().ServiceCIDRs().Get(ctx, defaultservicecidr.DefaultServiceCIDRName, metav1.GetOptions{}) - if err != nil { - return err - } - cidrToUpdate.Annotations["updated"] = "true" - updatedCIDR, err = f.ClientSet.NetworkingV1().ServiceCIDRs().Update(ctx, cidrToUpdate, metav1.UpdateOptions{}) - return err - }) - if err != nil { - framework.Failf("unexpected error updating IPAddress: %v", err) - } - if v, ok := updatedCIDR.Annotations["updated"]; !ok || v != "true" { - framework.Failf("updated object should have the applied annotation") - } - ginkgo.By("listing") list, err := f.ClientSet.NetworkingV1().ServiceCIDRs().List(ctx, metav1.ListOptions{}) if err != nil { diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/csimock/base.go b/vendor/k8s.io/kubernetes/test/e2e/storage/csimock/base.go index d58a7b33c57c..a75761450b62 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/csimock/base.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/csimock/base.go @@ -463,7 +463,7 @@ func (m *mockDriverSetup) createPodWithFSGroup(ctx context.Context, fsGroup *int return class, claim, pod } -func (m *mockDriverSetup) createPodWithSELinux(ctx context.Context, accessModes []v1.PersistentVolumeAccessMode, mountOptions []string, seLinuxOpts *v1.SELinuxOptions, policy *v1.PodSELinuxChangePolicy) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) { +func (m *mockDriverSetup) createPodWithSELinux(ctx context.Context, accessModes []v1.PersistentVolumeAccessMode, mountOptions []string, seLinuxOpts *v1.SELinuxOptions, policy *v1.PodSELinuxChangePolicy, privileged bool) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) { ginkgo.By("Creating pod with SELinux context") f := m.f nodeSelection := m.config.ClientNodeSelection @@ -480,7 +480,7 @@ func (m *mockDriverSetup) createPodWithSELinux(ctx context.Context, accessModes ReclaimPolicy: m.tp.reclaimPolicy, } class, claim := createClaim(ctx, f.ClientSet, scTest, nodeSelection, m.tp.scName, f.Namespace.Name, accessModes) - pod, err := startPausePodWithSELinuxOptions(f.ClientSet, claim, nodeSelection, f.Namespace.Name, seLinuxOpts, policy) + pod, err := startPausePodWithSELinuxOptions(f.ClientSet, claim, nodeSelection, f.Namespace.Name, seLinuxOpts, policy, privileged) framework.ExpectNoError(err, "Failed to create pause pod with SELinux context %s: %v", seLinuxOpts, err) if class != nil { @@ -802,7 +802,7 @@ func startBusyBoxPodWithVolumeSource(cs clientset.Interface, volumeSource v1.Vol return cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) } -func startPausePodWithSELinuxOptions(cs clientset.Interface, pvc *v1.PersistentVolumeClaim, node e2epod.NodeSelection, ns string, seLinuxOpts *v1.SELinuxOptions, policy *v1.PodSELinuxChangePolicy) (*v1.Pod, error) { +func startPausePodWithSELinuxOptions(cs clientset.Interface, pvc *v1.PersistentVolumeClaim, node e2epod.NodeSelection, ns string, seLinuxOpts *v1.SELinuxOptions, policy *v1.PodSELinuxChangePolicy, privileged bool) (*v1.Pod, error) { pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "pvc-volume-tester-", @@ -816,6 +816,9 @@ func startPausePodWithSELinuxOptions(cs clientset.Interface, pvc *v1.PersistentV { Name: "volume-tester", Image: imageutils.GetE2EImage(imageutils.Pause), + SecurityContext: &v1.SecurityContext{ + Privileged: &privileged, + }, VolumeMounts: []v1.VolumeMount{ { Name: "my-volume", diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/csimock/csi_selinux_mount.go b/vendor/k8s.io/kubernetes/test/e2e/storage/csimock/csi_selinux_mount.go index c3ed53418e1d..3a7045cbaf90 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/csimock/csi_selinux_mount.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/csimock/csi_selinux_mount.go @@ -298,7 +298,7 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount", func() { // Act ginkgo.By("Starting the initial pod") accessModes := []v1.PersistentVolumeAccessMode{t.volumeMode} - _, claim, pod := m.createPodWithSELinux(ctx, accessModes, t.mountOptions, t.firstPodSELinuxOpts, t.firstPodChangePolicy) + _, claim, pod := m.createPodWithSELinux(ctx, accessModes, t.mountOptions, t.firstPodSELinuxOpts, t.firstPodChangePolicy, false /* privileged */) err := e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace) framework.ExpectNoError(err, "starting the initial pod") @@ -331,7 +331,7 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount", func() { pod, err = m.cs.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "getting the initial pod") nodeSelection := e2epod.NodeSelection{Name: pod.Spec.NodeName} - pod2, err := startPausePodWithSELinuxOptions(f.ClientSet, claim, nodeSelection, f.Namespace.Name, t.secondPodSELinuxOpts, t.secondPodChangePolicy) + pod2, err := startPausePodWithSELinuxOptions(f.ClientSet, claim, nodeSelection, f.Namespace.Name, t.secondPodSELinuxOpts, t.secondPodChangePolicy, false /* privileged */) framework.ExpectNoError(err, "creating second pod with SELinux context %s", t.secondPodSELinuxOpts) m.pods = append(m.pods, pod2) @@ -453,8 +453,10 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount metrics and SELinuxWarningC csiDriverSELinuxEnabled bool firstPodSELinuxOpts *v1.SELinuxOptions firstPodChangePolicy *v1.PodSELinuxChangePolicy + firstPodPrivileged bool secondPodSELinuxOpts *v1.SELinuxOptions secondPodChangePolicy *v1.PodSELinuxChangePolicy + secondPodPrivileged bool volumeMode v1.PersistentVolumeAccessMode waitForSecondPodStart bool secondPodFailureEvent string @@ -599,7 +601,7 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount metrics and SELinuxWarningC testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMount)}, }, { - name: "error is not bumped on two Pods with a different policy RWX volume (nil + MountOption)", + name: "error is not bumped on two Pods with the same policy RWX volume (nil + MountOption)", csiDriverSELinuxEnabled: true, firstPodSELinuxOpts: &seLinuxOpts1, firstPodChangePolicy: &mount, @@ -611,7 +613,7 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount metrics and SELinuxWarningC testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMount)}, }, { - name: "error is not bumped on two Pods with a different policy RWX volume (MountOption + MountOption)", + name: "error is not bumped on two Pods with the same policy RWX volume (MountOption + MountOption)", csiDriverSELinuxEnabled: true, firstPodSELinuxOpts: &seLinuxOpts1, firstPodChangePolicy: &mount, @@ -648,6 +650,75 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount metrics and SELinuxWarningC expectControllerConflictProperty: "SELinuxLabel", testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMount)}, }, + { + name: "error is not bumped on two privileged Pods with mount policy RWO volume", + csiDriverSELinuxEnabled: true, + firstPodSELinuxOpts: nil, /* privileged Pods are typically without SELinux context */ + firstPodPrivileged: true, + firstPodChangePolicy: &recursive, + secondPodSELinuxOpts: nil, /* privileged Pods are typically without SELinux context */ + secondPodPrivileged: true, + secondPodChangePolicy: &recursive, + volumeMode: v1.ReadWriteOnce, + waitForSecondPodStart: true, + expectNodeIncreases: sets.New[string]( /* no metric is increased, admitted_total was already increased when the first pod started */ ), + testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMount)}, + }, + { + name: "error is not bumped on two privileged Pods with recursive policy RWO volume", + csiDriverSELinuxEnabled: true, + firstPodSELinuxOpts: nil, /* privileged Pods are typically without SELinux context */ + firstPodPrivileged: true, + firstPodChangePolicy: &mount, + secondPodSELinuxOpts: nil, /* privileged Pods are typically without SELinux context */ + secondPodPrivileged: true, + secondPodChangePolicy: &mount, + volumeMode: v1.ReadWriteOnce, + waitForSecondPodStart: true, + expectNodeIncreases: sets.New[string]( /* no metric is increased, admitted_total was already increased when the first pod started */ ), + testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMount)}, + }, + { + name: "error is not bumped on a privileged and unprivileged Pod with given SELinux context and recursive policy", + csiDriverSELinuxEnabled: true, + firstPodSELinuxOpts: nil, /* privileged Pods are typically without SELinux context */ + firstPodPrivileged: true, + secondPodSELinuxOpts: &seLinuxOpts1, + secondPodChangePolicy: &recursive, + secondPodPrivileged: false, + volumeMode: v1.ReadWriteMany, + waitForSecondPodStart: true, + expectNodeIncreases: sets.New[string]( /* no metric is increased, admitted_total was already increased when the first pod started */ ), + testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMount)}, + }, + { + name: "error is bumped on a privileged and unprivileged Pod with given SELinux with MountOption policy", + csiDriverSELinuxEnabled: true, + firstPodSELinuxOpts: nil, /* privileged Pods are typically without SELinux context */ + firstPodPrivileged: true, + secondPodSELinuxOpts: &seLinuxOpts1, + secondPodChangePolicy: &mount, + secondPodFailureEvent: "conflicting SELinux labels of volume", + volumeMode: v1.ReadWriteOncePod, + waitForSecondPodStart: false, + expectNodeIncreases: sets.New[string]("volume_manager_selinux_volume_context_mismatch_errors_total"), + expectControllerConflictProperty: "SELinuxLabel", + testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMount)}, + }, + { + name: "error is bumped on an unprivileged and privileged Pod with given SELinux with MountOption policy", + csiDriverSELinuxEnabled: true, + firstPodSELinuxOpts: &seLinuxOpts1, + firstPodChangePolicy: &mount, + secondPodSELinuxOpts: nil, /* privileged Pods are typically without SELinux context */ + secondPodPrivileged: true, + secondPodFailureEvent: "conflicting SELinux labels of volume", + volumeMode: v1.ReadWriteOncePod, + waitForSecondPodStart: false, + expectNodeIncreases: sets.New[string]("volume_manager_selinux_volume_context_mismatch_errors_total"), + expectControllerConflictProperty: "SELinuxLabel", + testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMount)}, + }, } for _, t := range tests { t := t @@ -673,7 +744,7 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount metrics and SELinuxWarningC ginkgo.By("Starting the first pod") accessModes := []v1.PersistentVolumeAccessMode{t.volumeMode} - _, claim, pod := m.createPodWithSELinux(ctx, accessModes, []string{}, t.firstPodSELinuxOpts, t.firstPodChangePolicy) + _, claim, pod := m.createPodWithSELinux(ctx, accessModes, []string{}, t.firstPodSELinuxOpts, t.firstPodChangePolicy, t.firstPodPrivileged) err = e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace) framework.ExpectNoError(err, "starting the initial pod") @@ -688,7 +759,7 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount metrics and SELinuxWarningC ginkgo.By("Starting the second pod") // Skip scheduler, it would block scheduling the second pod with ReadWriteOncePod PV. nodeSelection := e2epod.NodeSelection{Name: pod.Spec.NodeName} - pod2, err := startPausePodWithSELinuxOptions(f.ClientSet, claim, nodeSelection, f.Namespace.Name, t.secondPodSELinuxOpts, t.secondPodChangePolicy) + pod2, err := startPausePodWithSELinuxOptions(f.ClientSet, claim, nodeSelection, f.Namespace.Name, t.secondPodSELinuxOpts, t.secondPodChangePolicy, t.secondPodPrivileged) framework.ExpectNoError(err, "creating second pod with SELinux context %s", t.secondPodSELinuxOpts) m.pods = append(m.pods, pod2) diff --git a/vendor/k8s.io/kubernetes/test/utils/image/manifest.go b/vendor/k8s.io/kubernetes/test/utils/image/manifest.go index 7574ceaf6b24..6f0d49c47baf 100644 --- a/vendor/k8s.io/kubernetes/test/utils/image/manifest.go +++ b/vendor/k8s.io/kubernetes/test/utils/image/manifest.go @@ -223,7 +223,7 @@ func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config configs[APIServer] = Config{list.PromoterE2eRegistry, "sample-apiserver", "1.29.2"} configs[AppArmorLoader] = Config{list.PromoterE2eRegistry, "apparmor-loader", "1.4"} configs[BusyBox] = Config{list.PromoterE2eRegistry, "busybox", "1.36.1-1"} - configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.7.6"} + configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.7.7"} configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.21-0"} configs[Httpd] = Config{list.PromoterE2eRegistry, "httpd", "2.4.38-4"} configs[HttpdNew] = Config{list.PromoterE2eRegistry, "httpd", "2.4.39-4"} diff --git a/vendor/k8s.io/utils/net/multi_listen.go b/vendor/k8s.io/utils/net/multi_listen.go index 7cb7795beca7..e5d508055d32 100644 --- a/vendor/k8s.io/utils/net/multi_listen.go +++ b/vendor/k8s.io/utils/net/multi_listen.go @@ -21,6 +21,7 @@ import ( "fmt" "net" "sync" + "sync/atomic" ) // connErrPair pairs conn and error which is returned by accept on sub-listeners. @@ -38,6 +39,7 @@ type multiListener struct { connCh chan connErrPair // stopCh communicates from parent to child listeners. stopCh chan struct{} + closed atomic.Bool } // compile time check to ensure *multiListener implements net.Listener @@ -150,10 +152,8 @@ func (ml *multiListener) Accept() (net.Conn, error) { // the go-routines to exit. func (ml *multiListener) Close() error { // Make sure this can be called repeatedly without explosions. - select { - case <-ml.stopCh: + if !ml.closed.CompareAndSwap(false, true) { return fmt.Errorf("use of closed network connection") - default: } // Tell all sub-listeners to stop. diff --git a/vendor/modules.txt b/vendor/modules.txt index 24e90351504a..70cef1adf59f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -2698,7 +2698,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.33.2 => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20250716113245-b94367cabf3e +# k8s.io/api v0.33.4 => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20250906192346-6efb6a95323f ## explicit; go 1.24.0 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -2760,7 +2760,7 @@ k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 k8s.io/api/storagemigration/v1alpha1 -# k8s.io/apiextensions-apiserver v0.33.2 => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20250716113245-b94367cabf3e +# k8s.io/apiextensions-apiserver v0.33.4 => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20250906192346-6efb6a95323f ## explicit; go 1.24.0 k8s.io/apiextensions-apiserver/pkg/apihelpers k8s.io/apiextensions-apiserver/pkg/apis/apiextensions @@ -2810,7 +2810,7 @@ k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition k8s.io/apiextensions-apiserver/test/integration k8s.io/apiextensions-apiserver/test/integration/fixtures -# k8s.io/apimachinery v0.33.2 => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20250716113245-b94367cabf3e +# k8s.io/apimachinery v0.33.4 => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20250906192346-6efb6a95323f ## explicit; go 1.24.0 k8s.io/apimachinery/pkg/api/apitesting k8s.io/apimachinery/pkg/api/equality @@ -2889,7 +2889,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.33.2 => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20250716113245-b94367cabf3e +# k8s.io/apiserver v0.33.4 => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20250906192346-6efb6a95323f ## explicit; go 1.24.0 k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/configuration @@ -3076,13 +3076,13 @@ k8s.io/apiserver/plugin/pkg/authenticator/token/oidc k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook/metrics -# k8s.io/cli-runtime v0.33.2 => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20250716113245-b94367cabf3e +# k8s.io/cli-runtime v0.33.4 => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20250906192346-6efb6a95323f ## explicit; go 1.24.0 k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/genericiooptions k8s.io/cli-runtime/pkg/printers k8s.io/cli-runtime/pkg/resource -# k8s.io/client-go v0.33.2 => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20250716113245-b94367cabf3e +# k8s.io/client-go v0.33.4 => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20250906192346-6efb6a95323f ## explicit; go 1.24.0 k8s.io/client-go/applyconfigurations k8s.io/client-go/applyconfigurations/admissionregistration/v1 @@ -3451,7 +3451,7 @@ k8s.io/client-go/util/retry k8s.io/client-go/util/testing k8s.io/client-go/util/watchlist k8s.io/client-go/util/workqueue -# k8s.io/cloud-provider v0.31.1 => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20250716113245-b94367cabf3e +# k8s.io/cloud-provider v0.31.1 => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20250906192346-6efb6a95323f ## explicit; go 1.24.0 k8s.io/cloud-provider k8s.io/cloud-provider/api @@ -3470,13 +3470,13 @@ k8s.io/cloud-provider/service/helpers k8s.io/cloud-provider/volume k8s.io/cloud-provider/volume/errors k8s.io/cloud-provider/volume/helpers -# k8s.io/cluster-bootstrap v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20250716113245-b94367cabf3e +# k8s.io/cluster-bootstrap v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20250906192346-6efb6a95323f ## explicit; go 1.24.0 k8s.io/cluster-bootstrap/token/api k8s.io/cluster-bootstrap/token/util k8s.io/cluster-bootstrap/util/secrets k8s.io/cluster-bootstrap/util/tokens -# k8s.io/component-base v0.33.2 => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20250716113245-b94367cabf3e +# k8s.io/component-base v0.33.4 => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20250906192346-6efb6a95323f ## explicit; go 1.24.0 k8s.io/component-base/cli/flag k8s.io/component-base/cli/globalflag @@ -3512,7 +3512,7 @@ k8s.io/component-base/zpages/features k8s.io/component-base/zpages/flagz k8s.io/component-base/zpages/httputil k8s.io/component-base/zpages/statusz -# k8s.io/component-helpers v0.33.2 => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20250716113245-b94367cabf3e +# k8s.io/component-helpers v0.33.4 => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20250906192346-6efb6a95323f ## explicit; go 1.24.0 k8s.io/component-helpers/apimachinery/lease k8s.io/component-helpers/apps/poddisruptionbudget @@ -3526,7 +3526,7 @@ k8s.io/component-helpers/scheduling/corev1 k8s.io/component-helpers/scheduling/corev1/nodeaffinity k8s.io/component-helpers/storage/ephemeral k8s.io/component-helpers/storage/volume -# k8s.io/controller-manager v0.32.1 => github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20250716113245-b94367cabf3e +# k8s.io/controller-manager v0.32.1 => github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20250906192346-6efb6a95323f ## explicit; go 1.24.0 k8s.io/controller-manager/config k8s.io/controller-manager/config/v1 @@ -3538,22 +3538,22 @@ k8s.io/controller-manager/pkg/features k8s.io/controller-manager/pkg/features/register k8s.io/controller-manager/pkg/leadermigration/config k8s.io/controller-manager/pkg/leadermigration/options -# k8s.io/cri-api v0.27.1 => github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20250716113245-b94367cabf3e +# k8s.io/cri-api v0.27.1 => github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20250906192346-6efb6a95323f ## explicit; go 1.24.0 k8s.io/cri-api/pkg/apis k8s.io/cri-api/pkg/apis/runtime/v1 k8s.io/cri-api/pkg/errors -# k8s.io/cri-client v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/cri-client v0.0.0-20250716113245-b94367cabf3e +# k8s.io/cri-client v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/cri-client v0.0.0-20250906192346-6efb6a95323f ## explicit; go 1.24.0 k8s.io/cri-client/pkg k8s.io/cri-client/pkg/internal k8s.io/cri-client/pkg/logs k8s.io/cri-client/pkg/util -# k8s.io/csi-translation-lib v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20250716113245-b94367cabf3e +# k8s.io/csi-translation-lib v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20250906192346-6efb6a95323f ## explicit; go 1.24.0 k8s.io/csi-translation-lib k8s.io/csi-translation-lib/plugins -# k8s.io/dynamic-resource-allocation v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/dynamic-resource-allocation v0.0.0-20250716113245-b94367cabf3e +# k8s.io/dynamic-resource-allocation v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/dynamic-resource-allocation v0.0.0-20250906192346-6efb6a95323f ## explicit; go 1.24.0 k8s.io/dynamic-resource-allocation/api k8s.io/dynamic-resource-allocation/cel @@ -3563,14 +3563,14 @@ k8s.io/dynamic-resource-allocation/resourceclaim k8s.io/dynamic-resource-allocation/resourceslice k8s.io/dynamic-resource-allocation/resourceslice/tracker k8s.io/dynamic-resource-allocation/structured -# k8s.io/endpointslice v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/endpointslice v0.0.0-20250716113245-b94367cabf3e +# k8s.io/endpointslice v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/endpointslice v0.0.0-20250906192346-6efb6a95323f ## explicit; go 1.24.0 k8s.io/endpointslice k8s.io/endpointslice/metrics k8s.io/endpointslice/topologycache k8s.io/endpointslice/trafficdist k8s.io/endpointslice/util -# k8s.io/externaljwt v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/externaljwt v0.0.0-20250716113245-b94367cabf3e +# k8s.io/externaljwt v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/externaljwt v0.0.0-20250906192346-6efb6a95323f ## explicit; go 1.24.0 k8s.io/externaljwt/apis/v1alpha1 # k8s.io/klog v1.0.0 @@ -3594,7 +3594,7 @@ k8s.io/kms/apis/v1beta1 k8s.io/kms/apis/v2 k8s.io/kms/pkg/service k8s.io/kms/pkg/util -# k8s.io/kube-aggregator v0.33.2 => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20250716113245-b94367cabf3e +# k8s.io/kube-aggregator v0.33.4 => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20250906192346-6efb6a95323f ## explicit; go 1.24.0 k8s.io/kube-aggregator/pkg/apis/apiregistration k8s.io/kube-aggregator/pkg/apis/apiregistration/install @@ -3654,11 +3654,11 @@ k8s.io/kube-openapi/pkg/validation/spec k8s.io/kube-openapi/pkg/validation/strfmt k8s.io/kube-openapi/pkg/validation/strfmt/bson k8s.io/kube-openapi/pkg/validation/validate -# k8s.io/kube-scheduler v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20250716113245-b94367cabf3e +# k8s.io/kube-scheduler v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20250906192346-6efb6a95323f ## explicit; go 1.24.0 k8s.io/kube-scheduler/config/v1 k8s.io/kube-scheduler/extender/v1 -# k8s.io/kubectl v0.33.2 => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20250716113245-b94367cabf3e +# k8s.io/kubectl v0.33.4 => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20250906192346-6efb6a95323f ## explicit; go 1.24.0 k8s.io/kubectl/pkg/apps k8s.io/kubectl/pkg/cmd/apiresources @@ -3688,7 +3688,7 @@ k8s.io/kubectl/pkg/util/storage k8s.io/kubectl/pkg/util/templates k8s.io/kubectl/pkg/util/term k8s.io/kubectl/pkg/validation -# k8s.io/kubelet v0.31.1 => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20250716113245-b94367cabf3e +# k8s.io/kubelet v0.31.1 => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20250906192346-6efb6a95323f ## explicit; go 1.24.0 k8s.io/kubelet/config/v1 k8s.io/kubelet/config/v1alpha1 @@ -3710,7 +3710,7 @@ k8s.io/kubelet/pkg/cri/streaming k8s.io/kubelet/pkg/cri/streaming/portforward k8s.io/kubelet/pkg/cri/streaming/remotecommand k8s.io/kubelet/pkg/types -# k8s.io/kubernetes v1.33.2 => github.com/openshift/kubernetes v1.30.1-0.20250815165952-eba09d2066a6 +# k8s.io/kubernetes v1.33.4 => github.com/openshift/kubernetes v1.30.1-0.20250906192346-6efb6a95323f ## explicit; go 1.24.0 k8s.io/kubernetes/cmd/kube-apiserver/app k8s.io/kubernetes/cmd/kube-apiserver/app/options @@ -4544,10 +4544,10 @@ k8s.io/kubernetes/third_party/forked/gonum/graph/simple k8s.io/kubernetes/third_party/forked/gonum/graph/traverse k8s.io/kubernetes/third_party/forked/libcontainer/apparmor k8s.io/kubernetes/third_party/forked/libcontainer/utils -# k8s.io/mount-utils v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20250716113245-b94367cabf3e +# k8s.io/mount-utils v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20250906192346-6efb6a95323f ## explicit; go 1.24.0 k8s.io/mount-utils -# k8s.io/pod-security-admission v0.33.2 => github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20250716113245-b94367cabf3e +# k8s.io/pod-security-admission v0.33.4 => github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20250906192346-6efb6a95323f ## explicit; go 1.24.0 k8s.io/pod-security-admission/admission k8s.io/pod-security-admission/admission/api @@ -4560,7 +4560,7 @@ k8s.io/pod-security-admission/admission/api/validation k8s.io/pod-security-admission/api k8s.io/pod-security-admission/metrics k8s.io/pod-security-admission/policy -# k8s.io/sample-apiserver v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20250716113245-b94367cabf3e +# k8s.io/sample-apiserver v0.0.0 => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20250906192346-6efb6a95323f ## explicit; go 1.24.0 k8s.io/sample-apiserver/pkg/apis/wardle k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1 @@ -4568,7 +4568,7 @@ k8s.io/sample-apiserver/pkg/apis/wardle/v1beta1 k8s.io/sample-apiserver/pkg/generated/applyconfiguration/wardle/v1alpha1 k8s.io/sample-apiserver/pkg/generated/clientset/versioned/scheme k8s.io/sample-apiserver/pkg/generated/clientset/versioned/typed/wardle/v1alpha1 -# k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 +# k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d ## explicit; go 1.18 k8s.io/utils/buffer k8s.io/utils/clock @@ -4864,34 +4864,34 @@ sigs.k8s.io/yaml/goyaml.v2 sigs.k8s.io/yaml/goyaml.v3 # github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20250416174521-4eb003743b54 # go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc => go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 -# k8s.io/api => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20250716113245-b94367cabf3e -# k8s.io/apiextensions-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20250716113245-b94367cabf3e -# k8s.io/apimachinery => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20250716113245-b94367cabf3e -# k8s.io/apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20250716113245-b94367cabf3e -# k8s.io/cli-runtime => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20250716113245-b94367cabf3e -# k8s.io/client-go => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20250716113245-b94367cabf3e -# k8s.io/cloud-provider => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20250716113245-b94367cabf3e -# k8s.io/cluster-bootstrap => github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20250716113245-b94367cabf3e -# k8s.io/code-generator => github.com/openshift/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20250716113245-b94367cabf3e -# k8s.io/component-base => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20250716113245-b94367cabf3e -# k8s.io/component-helpers => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20250716113245-b94367cabf3e -# k8s.io/controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20250716113245-b94367cabf3e -# k8s.io/cri-api => github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20250716113245-b94367cabf3e -# k8s.io/cri-client => github.com/openshift/kubernetes/staging/src/k8s.io/cri-client v0.0.0-20250716113245-b94367cabf3e -# k8s.io/csi-translation-lib => github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20250716113245-b94367cabf3e -# k8s.io/dynamic-resource-allocation => github.com/openshift/kubernetes/staging/src/k8s.io/dynamic-resource-allocation v0.0.0-20250716113245-b94367cabf3e -# k8s.io/endpointslice => github.com/openshift/kubernetes/staging/src/k8s.io/endpointslice v0.0.0-20250716113245-b94367cabf3e -# k8s.io/externaljwt => github.com/openshift/kubernetes/staging/src/k8s.io/externaljwt v0.0.0-20250716113245-b94367cabf3e -# k8s.io/kube-aggregator => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20250716113245-b94367cabf3e -# k8s.io/kube-controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20250716113245-b94367cabf3e -# k8s.io/kube-proxy => github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20250716113245-b94367cabf3e -# k8s.io/kube-scheduler => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20250716113245-b94367cabf3e -# k8s.io/kubectl => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20250716113245-b94367cabf3e -# k8s.io/kubelet => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20250716113245-b94367cabf3e -# k8s.io/kubernetes => github.com/openshift/kubernetes v1.30.1-0.20250815165952-eba09d2066a6 -# k8s.io/metrics => github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20250716113245-b94367cabf3e -# k8s.io/mount-utils => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20250716113245-b94367cabf3e -# k8s.io/pod-security-admission => github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20250716113245-b94367cabf3e -# k8s.io/sample-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20250716113245-b94367cabf3e -# k8s.io/sample-cli-plugin => github.com/openshift/kubernetes/staging/src/k8s.io/sample-cli-plugin v0.0.0-20250716113245-b94367cabf3e -# k8s.io/sample-controller => github.com/openshift/kubernetes/staging/src/k8s.io/sample-controller v0.0.0-20250716113245-b94367cabf3e +# k8s.io/api => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20250906192346-6efb6a95323f +# k8s.io/apiextensions-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20250906192346-6efb6a95323f +# k8s.io/apimachinery => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20250906192346-6efb6a95323f +# k8s.io/apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20250906192346-6efb6a95323f +# k8s.io/cli-runtime => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20250906192346-6efb6a95323f +# k8s.io/client-go => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20250906192346-6efb6a95323f +# k8s.io/cloud-provider => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20250906192346-6efb6a95323f +# k8s.io/cluster-bootstrap => github.com/openshift/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20250906192346-6efb6a95323f +# k8s.io/code-generator => github.com/openshift/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20250906192346-6efb6a95323f +# k8s.io/component-base => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20250906192346-6efb6a95323f +# k8s.io/component-helpers => github.com/openshift/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20250906192346-6efb6a95323f +# k8s.io/controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20250906192346-6efb6a95323f +# k8s.io/cri-api => github.com/openshift/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20250906192346-6efb6a95323f +# k8s.io/cri-client => github.com/openshift/kubernetes/staging/src/k8s.io/cri-client v0.0.0-20250906192346-6efb6a95323f +# k8s.io/csi-translation-lib => github.com/openshift/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20250906192346-6efb6a95323f +# k8s.io/dynamic-resource-allocation => github.com/openshift/kubernetes/staging/src/k8s.io/dynamic-resource-allocation v0.0.0-20250906192346-6efb6a95323f +# k8s.io/endpointslice => github.com/openshift/kubernetes/staging/src/k8s.io/endpointslice v0.0.0-20250906192346-6efb6a95323f +# k8s.io/externaljwt => github.com/openshift/kubernetes/staging/src/k8s.io/externaljwt v0.0.0-20250906192346-6efb6a95323f +# k8s.io/kube-aggregator => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20250906192346-6efb6a95323f +# k8s.io/kube-controller-manager => github.com/openshift/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20250906192346-6efb6a95323f +# k8s.io/kube-proxy => github.com/openshift/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20250906192346-6efb6a95323f +# k8s.io/kube-scheduler => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20250906192346-6efb6a95323f +# k8s.io/kubectl => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20250906192346-6efb6a95323f +# k8s.io/kubelet => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20250906192346-6efb6a95323f +# k8s.io/kubernetes => github.com/openshift/kubernetes v1.30.1-0.20250906192346-6efb6a95323f +# k8s.io/metrics => github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20250906192346-6efb6a95323f +# k8s.io/mount-utils => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20250906192346-6efb6a95323f +# k8s.io/pod-security-admission => github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20250906192346-6efb6a95323f +# k8s.io/sample-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20250906192346-6efb6a95323f +# k8s.io/sample-cli-plugin => github.com/openshift/kubernetes/staging/src/k8s.io/sample-cli-plugin v0.0.0-20250906192346-6efb6a95323f +# k8s.io/sample-controller => github.com/openshift/kubernetes/staging/src/k8s.io/sample-controller v0.0.0-20250906192346-6efb6a95323f diff --git a/zz_generated.manifests/test-reporting.yaml b/zz_generated.manifests/test-reporting.yaml index 049e32d8c3bc..2d9a2051dcd0 100644 --- a/zz_generated.manifests/test-reporting.yaml +++ b/zz_generated.manifests/test-reporting.yaml @@ -1351,6 +1351,16 @@ spec: [Feature:SELinux] should pass SELinux mount option for RWO volume with SELinuxMount enabled and nil policy [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxChangePolicy] [Beta] [FeatureGate:SELinuxMount] [Beta] [Feature:OffByDefault]' + - testName: '[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController + SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is bumped + on a privileged and unprivileged Pod with given SELinux with MountOption policy + [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxChangePolicy] + [Beta] [FeatureGate:SELinuxMount] [Beta] [Feature:OffByDefault]' + - testName: '[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController + SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is bumped + on an unprivileged and privileged Pod with given SELinux with MountOption + policy [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxChangePolicy] + [Beta] [FeatureGate:SELinuxMount] [Beta] [Feature:OffByDefault]' - testName: '[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is bumped on two Pods with MountOption policy and a different context on RWOP volume @@ -1387,6 +1397,11 @@ spec: on two Pods with a different policy on RWO volume and SELinuxMount enabled (nil + Recursive) [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxChangePolicy] [Beta] [FeatureGate:SELinuxMount] [Beta] [Feature:OffByDefault]' + - testName: '[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController + SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is not bumped + on a privileged and unprivileged Pod with given SELinux context and recursive + policy [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxChangePolicy] + [Beta] [FeatureGate:SELinuxMount] [Beta] [Feature:OffByDefault]' - testName: '[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is not bumped on two Pods with Recursive policy and a different context on RWX volume [FeatureGate:SELinuxMountReadWriteOncePod] @@ -1394,17 +1409,27 @@ spec: [Beta] [Feature:OffByDefault]' - testName: '[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is not bumped - on two Pods with a different policy RWX volume (MountOption + MountOption) - [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxChangePolicy] - [Beta] [FeatureGate:SELinuxMount] [Beta] [Feature:OffByDefault]' + on two Pods with the same context on RWO volume and SELinuxMount enabled [FeatureGate:SELinuxMountReadWriteOncePod] + [Beta] [FeatureGate:SELinuxChangePolicy] [Beta] [FeatureGate:SELinuxMount] + [Beta] [Feature:OffByDefault]' - testName: '[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is not bumped - on two Pods with a different policy RWX volume (nil + MountOption) [FeatureGate:SELinuxMountReadWriteOncePod] + on two Pods with the same policy RWX volume (MountOption + MountOption) [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxChangePolicy] [Beta] [FeatureGate:SELinuxMount] [Beta] [Feature:OffByDefault]' - testName: '[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is not bumped - on two Pods with the same context on RWO volume and SELinuxMount enabled [FeatureGate:SELinuxMountReadWriteOncePod] + on two Pods with the same policy RWX volume (nil + MountOption) [FeatureGate:SELinuxMountReadWriteOncePod] + [Beta] [FeatureGate:SELinuxChangePolicy] [Beta] [FeatureGate:SELinuxMount] + [Beta] [Feature:OffByDefault]' + - testName: '[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController + SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is not bumped + on two privileged Pods with mount policy RWO volume [FeatureGate:SELinuxMountReadWriteOncePod] + [Beta] [FeatureGate:SELinuxChangePolicy] [Beta] [FeatureGate:SELinuxMount] + [Beta] [Feature:OffByDefault]' + - testName: '[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController + SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is not bumped + on two privileged Pods with recursive policy RWO volume [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxChangePolicy] [Beta] [FeatureGate:SELinuxMount] [Beta] [Feature:OffByDefault]' - testName: '[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController @@ -1456,6 +1481,16 @@ spec: [Feature:SELinux] should unstage RWO volume when starting a second pod with different policy (Recursive -> MountOption) [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxMount] [Beta] [Feature:OffByDefault]' + - testName: '[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController + SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is bumped + on a privileged and unprivileged Pod with given SELinux with MountOption policy + [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxChangePolicy] + [Beta] [FeatureGate:SELinuxMount] [Beta] [Feature:OffByDefault]' + - testName: '[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController + SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is bumped + on an unprivileged and privileged Pod with given SELinux with MountOption + policy [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxChangePolicy] + [Beta] [FeatureGate:SELinuxMount] [Beta] [Feature:OffByDefault]' - testName: '[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is bumped on two Pods with MountOption policy and a different context on RWOP volume @@ -1487,6 +1522,11 @@ spec: on two Pods with a different policy on RWO volume and SELinuxMount enabled (nil + Recursive) [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxChangePolicy] [Beta] [FeatureGate:SELinuxMount] [Beta] [Feature:OffByDefault]' + - testName: '[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController + SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is not bumped + on a privileged and unprivileged Pod with given SELinux context and recursive + policy [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxChangePolicy] + [Beta] [FeatureGate:SELinuxMount] [Beta] [Feature:OffByDefault]' - testName: '[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is not bumped on two Pods with Recursive policy and a different context on RWX volume [FeatureGate:SELinuxMountReadWriteOncePod] @@ -1494,17 +1534,27 @@ spec: [Beta] [Feature:OffByDefault]' - testName: '[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is not bumped - on two Pods with a different policy RWX volume (MountOption + MountOption) - [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxChangePolicy] - [Beta] [FeatureGate:SELinuxMount] [Beta] [Feature:OffByDefault]' + on two Pods with the same context on RWO volume and SELinuxMount enabled [FeatureGate:SELinuxMountReadWriteOncePod] + [Beta] [FeatureGate:SELinuxChangePolicy] [Beta] [FeatureGate:SELinuxMount] + [Beta] [Feature:OffByDefault]' - testName: '[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is not bumped - on two Pods with a different policy RWX volume (nil + MountOption) [FeatureGate:SELinuxMountReadWriteOncePod] + on two Pods with the same policy RWX volume (MountOption + MountOption) [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxChangePolicy] [Beta] [FeatureGate:SELinuxMount] [Beta] [Feature:OffByDefault]' - testName: '[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is not bumped - on two Pods with the same context on RWO volume and SELinuxMount enabled [FeatureGate:SELinuxMountReadWriteOncePod] + on two Pods with the same policy RWX volume (nil + MountOption) [FeatureGate:SELinuxMountReadWriteOncePod] + [Beta] [FeatureGate:SELinuxChangePolicy] [Beta] [FeatureGate:SELinuxMount] + [Beta] [Feature:OffByDefault]' + - testName: '[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController + SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is not bumped + on two privileged Pods with mount policy RWO volume [FeatureGate:SELinuxMountReadWriteOncePod] + [Beta] [FeatureGate:SELinuxChangePolicy] [Beta] [FeatureGate:SELinuxMount] + [Beta] [Feature:OffByDefault]' + - testName: '[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController + SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is not bumped + on two privileged Pods with recursive policy RWO volume [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxChangePolicy] [Beta] [FeatureGate:SELinuxMount] [Beta] [Feature:OffByDefault]' - featureGate: SELinuxMountReadWriteOncePod @@ -1566,6 +1616,16 @@ spec: - testName: '[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should unstage RWOP volume when starting a second pod with different SELinux context [FeatureGate:SELinuxMountReadWriteOncePod] [Beta]' + - testName: '[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController + SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is bumped + on a privileged and unprivileged Pod with given SELinux with MountOption policy + [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxChangePolicy] + [Beta] [FeatureGate:SELinuxMount] [Beta] [Feature:OffByDefault]' + - testName: '[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController + SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is bumped + on an unprivileged and privileged Pod with given SELinux with MountOption + policy [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxChangePolicy] + [Beta] [FeatureGate:SELinuxMount] [Beta] [Feature:OffByDefault]' - testName: '[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is bumped on two Pods with MountOption policy and a different context on RWOP volume @@ -1602,6 +1662,11 @@ spec: on two Pods with a different policy on RWO volume and SELinuxMount enabled (nil + Recursive) [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxChangePolicy] [Beta] [FeatureGate:SELinuxMount] [Beta] [Feature:OffByDefault]' + - testName: '[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController + SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is not bumped + on a privileged and unprivileged Pod with given SELinux context and recursive + policy [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxChangePolicy] + [Beta] [FeatureGate:SELinuxMount] [Beta] [Feature:OffByDefault]' - testName: '[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is not bumped on two Pods with Recursive policy and a different context on RWX volume [FeatureGate:SELinuxMountReadWriteOncePod] @@ -1609,17 +1674,27 @@ spec: [Beta] [Feature:OffByDefault]' - testName: '[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is not bumped - on two Pods with a different policy RWX volume (MountOption + MountOption) - [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxChangePolicy] - [Beta] [FeatureGate:SELinuxMount] [Beta] [Feature:OffByDefault]' + on two Pods with the same context on RWO volume and SELinuxMount enabled [FeatureGate:SELinuxMountReadWriteOncePod] + [Beta] [FeatureGate:SELinuxChangePolicy] [Beta] [FeatureGate:SELinuxMount] + [Beta] [Feature:OffByDefault]' - testName: '[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is not bumped - on two Pods with a different policy RWX volume (nil + MountOption) [FeatureGate:SELinuxMountReadWriteOncePod] + on two Pods with the same policy RWX volume (MountOption + MountOption) [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxChangePolicy] [Beta] [FeatureGate:SELinuxMount] [Beta] [Feature:OffByDefault]' - testName: '[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is not bumped - on two Pods with the same context on RWO volume and SELinuxMount enabled [FeatureGate:SELinuxMountReadWriteOncePod] + on two Pods with the same policy RWX volume (nil + MountOption) [FeatureGate:SELinuxMountReadWriteOncePod] + [Beta] [FeatureGate:SELinuxChangePolicy] [Beta] [FeatureGate:SELinuxMount] + [Beta] [Feature:OffByDefault]' + - testName: '[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController + SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is not bumped + on two privileged Pods with mount policy RWO volume [FeatureGate:SELinuxMountReadWriteOncePod] + [Beta] [FeatureGate:SELinuxChangePolicy] [Beta] [FeatureGate:SELinuxMount] + [Beta] [Feature:OffByDefault]' + - testName: '[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController + SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is not bumped + on two privileged Pods with recursive policy RWO volume [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxChangePolicy] [Beta] [FeatureGate:SELinuxMount] [Beta] [Feature:OffByDefault]' - testName: '[sig-storage] CSI Mock selinux on mount metrics and SELinuxWarningController