From 9fa4ff6f57af2d9223995bf809fa16e3ce50ee46 Mon Sep 17 00:00:00 2001 From: Knative Automation Date: Thu, 21 Aug 2025 12:13:49 -0400 Subject: [PATCH 01/13] upgrade to latest dependencies (#16035) Signed-off-by: Knative Automation --- go.sum | 15 ++++---- vendor/google.golang.org/grpc/MAINTAINERS.md | 8 ++--- .../endpointsharding/endpointsharding.go | 21 +++++++++-- .../pickfirst/pickfirstleaf/pickfirstleaf.go | 6 ++-- vendor/google.golang.org/grpc/clientconn.go | 11 ++---- .../grpc/credentials/credentials.go | 25 ++++++++++--- .../google.golang.org/grpc/credentials/tls.go | 30 +++++++++------- vendor/google.golang.org/grpc/dialoptions.go | 2 ++ .../grpc/internal/envconfig/envconfig.go | 16 ++++++--- .../grpc/internal/internal.go | 29 --------------- .../internal/resolver/dns/dns_resolver.go | 20 +++++------ .../google.golang.org/grpc/picker_wrapper.go | 36 +++++++++---------- .../grpc/resolver/resolver.go | 5 +++ vendor/google.golang.org/grpc/server.go | 1 + vendor/google.golang.org/grpc/stats/stats.go | 20 +++++++---- vendor/google.golang.org/grpc/stream.go | 32 +++++++++++++++-- vendor/google.golang.org/grpc/version.go | 2 +- vendor/knative.dev/hack/release.sh | 3 +- vendor/modules.txt | 12 +++---- 19 files changed, 170 insertions(+), 124 deletions(-) diff --git a/go.sum b/go.sum index 2f3c1dc5e82a..0a828023bb57 100644 --- a/go.sum +++ b/go.sum @@ -507,8 +507,9 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0= gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca h1:PupagGYwj8+I4ubCxcmcBRk3VlUWtTg5huQpZR9flmE= gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= google.golang.org/api v0.198.0 h1:OOH5fZatk57iN0A7tjJQzt6aPfYQ1JiWkt1yGseazks= google.golang.org/api v0.198.0/go.mod h1:/Lblzl3/Xqqk9hw/yS97TImKTUwnf1bv89v7+OagJzc= @@ -517,17 +518,17 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY= -google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 h1:FiusG7LWj+4byqhbvmB+Q93B/mOxJLN2DTozDuZm4EU= +google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:kXqgZtrWaf6qS3jZOCnCH7WYfrvFjkC51bM8fz3RsCA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250818200422-3122310a409c h1:qXWI/sQtv5UKboZ/zUk7h+mrf/lXORyI+n9DKDAusdg= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250818200422-3122310a409c/go.mod h1:gw1tLEfykwDz2ET4a12jcXt4couGAm7IwsVaTy0Sflo= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.74.2 h1:WoosgB65DlWVC9FqI82dGsZhWFNBSLjQ84bjROOpMu4= -google.golang.org/grpc v1.74.2/go.mod h1:CtQ+BGjaAIXHs/5YS3i473GqwBBa1zGQNevxdeBEXrM= +google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= +google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md index 5d4096d46a04..df35bb9a882a 100644 --- a/vendor/google.golang.org/grpc/MAINTAINERS.md +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -9,21 +9,19 @@ for general contribution guidelines. ## Maintainers (in alphabetical order) -- [aranjans](https://github.com/aranjans), Google LLC - [arjan-bal](https://github.com/arjan-bal), Google LLC - [arvindbr8](https://github.com/arvindbr8), Google LLC - [atollena](https://github.com/atollena), Datadog, Inc. - [dfawley](https://github.com/dfawley), Google LLC - [easwars](https://github.com/easwars), Google LLC -- [erm-g](https://github.com/erm-g), Google LLC - [gtcooke94](https://github.com/gtcooke94), Google LLC -- [purnesh42h](https://github.com/purnesh42h), Google LLC -- [zasweq](https://github.com/zasweq), Google LLC ## Emeritus Maintainers (in alphabetical order) - [adelez](https://github.com/adelez) +- [aranjans](https://github.com/aranjans) - [canguler](https://github.com/canguler) - [cesarghali](https://github.com/cesarghali) +- [erm-g](https://github.com/erm-g) - [iamqizhao](https://github.com/iamqizhao) - [jeanbza](https://github.com/jeanbza) - [jtattermusch](https://github.com/jtattermusch) @@ -32,5 +30,7 @@ for general contribution guidelines. - [matt-kwong](https://github.com/matt-kwong) - [menghanl](https://github.com/menghanl) - [nicolasnoble](https://github.com/nicolasnoble) +- [purnesh42h](https://github.com/purnesh42h) - [srini100](https://github.com/srini100) - [yongni](https://github.com/yongni) +- [zasweq](https://github.com/zasweq) diff --git a/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go index 0ad6bb1f2203..360db08ebc13 100644 --- a/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go +++ b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go @@ -37,6 +37,8 @@ import ( "google.golang.org/grpc/resolver" ) +var randIntN = rand.IntN + // ChildState is the balancer state of a child along with the endpoint which // identifies the child balancer. type ChildState struct { @@ -112,6 +114,21 @@ type endpointSharding struct { mu sync.Mutex } +// rotateEndpoints returns a slice of all the input endpoints rotated a random +// amount. +func rotateEndpoints(es []resolver.Endpoint) []resolver.Endpoint { + les := len(es) + if les == 0 { + return es + } + r := randIntN(les) + // Make a copy to avoid mutating data beyond the end of es. + ret := make([]resolver.Endpoint, les) + copy(ret, es[r:]) + copy(ret[les-r:], es[:r]) + return ret +} + // UpdateClientConnState creates a child for new endpoints and deletes children // for endpoints that are no longer present. It also updates all the children, // and sends a single synchronous update of the childrens' aggregated state at @@ -133,7 +150,7 @@ func (es *endpointSharding) UpdateClientConnState(state balancer.ClientConnState newChildren := resolver.NewEndpointMap[*balancerWrapper]() // Update/Create new children. - for _, endpoint := range state.ResolverState.Endpoints { + for _, endpoint := range rotateEndpoints(state.ResolverState.Endpoints) { if _, ok := newChildren.Get(endpoint); ok { // Endpoint child was already created, continue to avoid duplicate // update. @@ -279,7 +296,7 @@ func (es *endpointSharding) updateState() { p := &pickerWithChildStates{ pickers: pickers, childStates: childStates, - next: uint32(rand.IntN(len(pickers))), + next: uint32(randIntN(len(pickers))), } es.cc.UpdateState(balancer.State{ ConnectivityState: aggState, diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go index e62047256afb..67f315a0dbc4 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go @@ -67,21 +67,21 @@ var ( disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ Name: "grpc.lb.pick_first.disconnections", Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.", - Unit: "disconnection", + Unit: "{disconnection}", Labels: []string{"grpc.target"}, Default: false, }) connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ Name: "grpc.lb.pick_first.connection_attempts_succeeded", Description: "EXPERIMENTAL. Number of successful connection attempts.", - Unit: "attempt", + Unit: "{attempt}", Labels: []string{"grpc.target"}, Default: false, }) connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ Name: "grpc.lb.pick_first.connection_attempts_failed", Description: "EXPERIMENTAL. Number of failed connection attempts.", - Unit: "attempt", + Unit: "{attempt}", Labels: []string{"grpc.target"}, Default: false, }) diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index cd3eaf8ddcbd..3f762285db71 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -208,7 +208,7 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority) cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz) - cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) + cc.pickerWrapper = newPickerWrapper() cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers) @@ -1076,13 +1076,6 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { return cc.sc.healthCheckConfig } -func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) { - return cc.pickerWrapper.pick(ctx, failfast, balancer.PickInfo{ - Ctx: ctx, - FullMethodName: method, - }) -} - func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector) { if sc == nil { // should never reach here. @@ -1831,7 +1824,7 @@ func (cc *ClientConn) initAuthority() error { } else if auth, ok := cc.resolverBuilder.(resolver.AuthorityOverrider); ok { cc.authority = auth.OverrideAuthority(cc.parsedTarget) } else if strings.HasPrefix(endpoint, ":") { - cc.authority = "localhost" + endpoint + cc.authority = "localhost" + encodeAuthority(endpoint) } else { cc.authority = encodeAuthority(endpoint) } diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index a63ab606e665..c8e337cdda07 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -96,10 +96,11 @@ func (c CommonAuthInfo) GetCommonAuthInfo() CommonAuthInfo { return c } -// ProtocolInfo provides information regarding the gRPC wire protocol version, -// security protocol, security protocol version in use, server name, etc. +// ProtocolInfo provides static information regarding transport credentials. type ProtocolInfo struct { // ProtocolVersion is the gRPC wire protocol version. + // + // Deprecated: this is unused by gRPC. ProtocolVersion string // SecurityProtocol is the security protocol in use. SecurityProtocol string @@ -109,7 +110,16 @@ type ProtocolInfo struct { // // Deprecated: please use Peer.AuthInfo. SecurityVersion string - // ServerName is the user-configured server name. + // ServerName is the user-configured server name. If set, this overrides + // the default :authority header used for all RPCs on the channel using the + // containing credentials, unless grpc.WithAuthority is set on the channel, + // in which case that setting will take precedence. + // + // This must be a valid `:authority` header according to + // [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986#section-3.2). + // + // Deprecated: Users should use grpc.WithAuthority to override the authority + // on a channel instead of configuring the credentials. ServerName string } @@ -173,12 +183,17 @@ type TransportCredentials interface { // Clone makes a copy of this TransportCredentials. Clone() TransportCredentials // OverrideServerName specifies the value used for the following: + // // - verifying the hostname on the returned certificates // - as SNI in the client's handshake to support virtual hosting // - as the value for `:authority` header at stream creation time // - // Deprecated: use grpc.WithAuthority instead. Will be supported - // throughout 1.x. + // The provided string should be a valid `:authority` header according to + // [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986#section-3.2). + // + // Deprecated: this method is unused by gRPC. Users should use + // grpc.WithAuthority to override the authority on a channel instead of + // configuring the credentials. OverrideServerName(string) error } diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 20f65f7bd956..8277be7d6f85 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -110,14 +110,14 @@ func (c tlsCreds) Info() ProtocolInfo { func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { // use local cfg to avoid clobbering ServerName if using multiple endpoints cfg := credinternal.CloneTLSConfig(c.config) - if cfg.ServerName == "" { - serverName, _, err := net.SplitHostPort(authority) - if err != nil { - // If the authority had no host port or if the authority cannot be parsed, use it as-is. - serverName = authority - } - cfg.ServerName = serverName + + serverName, _, err := net.SplitHostPort(authority) + if err != nil { + // If the authority had no host port or if the authority cannot be parsed, use it as-is. + serverName = authority } + cfg.ServerName = serverName + conn := tls.Client(rawConn, cfg) errChannel := make(chan error, 1) go func() { @@ -259,9 +259,11 @@ func applyDefaults(c *tls.Config) *tls.Config { // certificates to establish the identity of the client need to be included in // the credentials (eg: for mTLS), use NewTLS instead, where a complete // tls.Config can be specified. -// serverNameOverride is for testing only. If set to a non empty string, -// it will override the virtual host name of authority (e.g. :authority header -// field) in requests. +// +// serverNameOverride is for testing only. If set to a non empty string, it will +// override the virtual host name of authority (e.g. :authority header field) in +// requests. Users should use grpc.WithAuthority passed to grpc.NewClient to +// override the authority of the client instead. func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) } @@ -271,9 +273,11 @@ func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) Transpor // certificates to establish the identity of the client need to be included in // the credentials (eg: for mTLS), use NewTLS instead, where a complete // tls.Config can be specified. -// serverNameOverride is for testing only. If set to a non empty string, -// it will override the virtual host name of authority (e.g. :authority header -// field) in requests. +// +// serverNameOverride is for testing only. If set to a non empty string, it will +// override the virtual host name of authority (e.g. :authority header field) in +// requests. Users should use grpc.WithAuthority passed to grpc.NewClient to +// override the authority of the client instead. func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { b, err := os.ReadFile(certFile) if err != nil { diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index ec0ca89ccdca..7a5ac2e7c494 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -608,6 +608,8 @@ func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOpt // WithAuthority returns a DialOption that specifies the value to be used as the // :authority pseudo-header and as the server name in authentication handshake. +// This overrides all other ways of setting authority on the channel, but can be +// overridden per-call by using grpc.CallAuthority. func WithAuthority(a string) DialOption { return newFuncDialOption(func(o *dialOptions) { o.authority = a diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 2fdaed88dbd1..7e060f5ed132 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -26,26 +26,32 @@ import ( ) var ( - // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). + // EnableTXTServiceConfig is set if the DNS resolver should perform TXT + // lookups for service config ("GRPC_ENABLE_TXT_SERVICE_CONFIG" is not + // "false"). + EnableTXTServiceConfig = boolFromEnv("GRPC_ENABLE_TXT_SERVICE_CONFIG", true) + + // TXTErrIgnore is set if TXT errors should be ignored + // ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true) + // RingHashCap indicates the maximum ring size which defaults to 4096 // entries but may be overridden by setting the environment variable // "GRPC_RING_HASH_CAP". This does not override the default bounds // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) + // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS // handshakes that can be performed. ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) + // EnforceALPNEnabled is set if TLS connections to servers with ALPN disabled // should be rejected. The HTTP/2 protocol requires ALPN to be enabled, this // option is present for backward compatibility. This option may be overridden // by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true" // or "false". EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true) - // XDSFallbackSupport is the env variable that controls whether support for - // xDS fallback is turned on. If this is unset or is false, only the first - // xDS server in the list of server configs will be used. - XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", true) + // NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used // instead of the exiting pickfirst implementation. This can be disabled by // setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST" diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 3ac798e8e60d..2699223a27f1 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -182,35 +182,6 @@ var ( // other features, including the CSDS service. NewXDSResolverWithClientForTesting any // func(xdsclient.XDSClient) (resolver.Builder, error) - // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster - // Specifier Plugin for testing purposes, regardless of the XDSRLS environment - // variable. - // - // TODO: Remove this function once the RLS env var is removed. - RegisterRLSClusterSpecifierPluginForTesting func() - - // UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster - // Specifier Plugin for testing purposes. This is needed because there is no way - // to unregister the RLS Cluster Specifier Plugin after registering it solely - // for testing purposes using RegisterRLSClusterSpecifierPluginForTesting(). - // - // TODO: Remove this function once the RLS env var is removed. - UnregisterRLSClusterSpecifierPluginForTesting func() - - // RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing - // purposes, regardless of the RBAC environment variable. - // - // TODO: Remove this function once the RBAC env var is removed. - RegisterRBACHTTPFilterForTesting func() - - // UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for - // testing purposes. This is needed because there is no way to unregister the - // HTTP Filter after registering it solely for testing purposes using - // RegisterRBACHTTPFilterForTesting(). - // - // TODO: Remove this function once the RBAC env var is removed. - UnregisterRBACHTTPFilterForTesting func() - // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY. ORCAAllowAnyMinReportingInterval any // func(so *orca.ServiceOptions) diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index ba5c5a95d0d7..ada5251cff3e 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -132,13 +132,13 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts // DNS address (non-IP). ctx, cancel := context.WithCancel(context.Background()) d := &dnsResolver{ - host: host, - port: port, - ctx: ctx, - cancel: cancel, - cc: cc, - rn: make(chan struct{}, 1), - disableServiceConfig: opts.DisableServiceConfig, + host: host, + port: port, + ctx: ctx, + cancel: cancel, + cc: cc, + rn: make(chan struct{}, 1), + enableServiceConfig: envconfig.EnableTXTServiceConfig && !opts.DisableServiceConfig, } d.resolver, err = internal.NewNetResolver(target.URL.Host) @@ -181,8 +181,8 @@ type dnsResolver struct { // finishes, race detector sometimes will warn lookup (READ the lookup // function pointers) inside watcher() goroutine has data race with // replaceNetFunc (WRITE the lookup function pointers). - wg sync.WaitGroup - disableServiceConfig bool + wg sync.WaitGroup + enableServiceConfig bool } // ResolveNow invoke an immediate resolution of the target that this @@ -346,7 +346,7 @@ func (d *dnsResolver) lookup() (*resolver.State, error) { if len(srv) > 0 { state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv}) } - if !d.disableServiceConfig { + if d.enableServiceConfig { state.ServiceConfig = d.lookupTXT(ctx) } return &state, nil diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index a2d2a798d488..aa52bfe95fd8 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -29,7 +29,6 @@ import ( "google.golang.org/grpc/internal/channelz" istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" - "google.golang.org/grpc/stats" "google.golang.org/grpc/status" ) @@ -48,14 +47,11 @@ type pickerGeneration struct { // actions and unblock when there's a picker update. type pickerWrapper struct { // If pickerGen holds a nil pointer, the pickerWrapper is closed. - pickerGen atomic.Pointer[pickerGeneration] - statsHandlers []stats.Handler // to record blocking picker calls + pickerGen atomic.Pointer[pickerGeneration] } -func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { - pw := &pickerWrapper{ - statsHandlers: statsHandlers, - } +func newPickerWrapper() *pickerWrapper { + pw := &pickerWrapper{} pw.pickerGen.Store(&pickerGeneration{ blockingCh: make(chan struct{}), }) @@ -93,6 +89,12 @@ func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) { } } +type pick struct { + transport transport.ClientTransport // the selected transport + result balancer.PickResult // the contents of the pick from the LB policy + blocked bool // set if a picker call queued for a new picker +} + // pick returns the transport that will be used for the RPC. // It may block in the following cases: // - there's no picker @@ -100,15 +102,16 @@ func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) { // - the current picker returns other errors and failfast is false. // - the subConn returned by the current picker is not READY // When one of these situations happens, pick blocks until the picker gets updated. -func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, balancer.PickResult, error) { +func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (pick, error) { var ch chan struct{} var lastPickErr error + pickBlocked := false for { pg := pw.pickerGen.Load() if pg == nil { - return nil, balancer.PickResult{}, ErrClientConnClosing + return pick{}, ErrClientConnClosing } if pg.picker == nil { ch = pg.blockingCh @@ -127,9 +130,9 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } switch ctx.Err() { case context.DeadlineExceeded: - return nil, balancer.PickResult{}, status.Error(codes.DeadlineExceeded, errStr) + return pick{}, status.Error(codes.DeadlineExceeded, errStr) case context.Canceled: - return nil, balancer.PickResult{}, status.Error(codes.Canceled, errStr) + return pick{}, status.Error(codes.Canceled, errStr) } case <-ch: } @@ -145,9 +148,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. // In the second case, the only way it will get to this conditional is // if there is a new picker. if ch != nil { - for _, sh := range pw.statsHandlers { - sh.HandleRPC(ctx, &stats.PickerUpdated{}) - } + pickBlocked = true } ch = pg.blockingCh @@ -164,7 +165,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. if istatus.IsRestrictedControlPlaneCode(st) { err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err) } - return nil, balancer.PickResult{}, dropError{error: err} + return pick{}, dropError{error: err} } // For all other errors, wait for ready RPCs should block and other // RPCs should fail with unavailable. @@ -172,7 +173,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. lastPickErr = err continue } - return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error()) + return pick{}, status.Error(codes.Unavailable, err.Error()) } acbw, ok := pickResult.SubConn.(*acBalancerWrapper) @@ -183,9 +184,8 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. if t := acbw.ac.getReadyTransport(); t != nil { if channelz.IsOn() { doneChannelzWrapper(acbw, &pickResult) - return t, pickResult, nil } - return t, pickResult, nil + return pick{transport: t, result: pickResult, blocked: pickBlocked}, nil } if pickResult.Done != nil { // Calling done with nil error, no bytes sent and no bytes received. diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index b84ef26d46d1..8e6af9514b6d 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -332,6 +332,11 @@ type AuthorityOverrider interface { // OverrideAuthority returns the authority to use for a ClientConn with the // given target. The implementation must generate it without blocking, // typically in line, and must keep it unchanged. + // + // The returned string must be a valid ":authority" header value, i.e. be + // encoded according to + // [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986#section-3.2) as + // necessary. OverrideAuthority(Target) string } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 70fe23f55022..1da2a542acde 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -1598,6 +1598,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv s: stream, p: &parser{r: stream, bufferPool: s.opts.bufferPool}, codec: s.getCodec(stream.ContentSubtype()), + desc: sd, maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, trInfo: trInfo, diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index baf7740efba9..10bf998aa5be 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -64,15 +64,21 @@ func (s *Begin) IsClient() bool { return s.Client } func (s *Begin) isRPCStats() {} -// PickerUpdated indicates that the LB policy provided a new picker while the -// RPC was waiting for one. -type PickerUpdated struct{} +// DelayedPickComplete indicates that the RPC is unblocked following a delay in +// selecting a connection for the call. +type DelayedPickComplete struct{} -// IsClient indicates if the stats information is from client side. Only Client -// Side interfaces with a Picker, thus always returns true. -func (*PickerUpdated) IsClient() bool { return true } +// IsClient indicates DelayedPickComplete is available on the client. +func (*DelayedPickComplete) IsClient() bool { return true } -func (*PickerUpdated) isRPCStats() {} +func (*DelayedPickComplete) isRPCStats() {} + +// PickerUpdated indicates that the RPC is unblocked following a delay in +// selecting a connection for the call. +// +// Deprecated: will be removed in a future release; use DelayedPickComplete +// instead. +type PickerUpdated = DelayedPickComplete // InPayload contains stats about an incoming payload. type InPayload struct { diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index ca6948926f93..d9bbd4c57cf6 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -469,8 +469,9 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) func (a *csAttempt) getTransport() error { cs := a.cs - var err error - a.transport, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) + pickInfo := balancer.PickInfo{Ctx: a.ctx, FullMethodName: cs.callHdr.Method} + pick, err := cs.cc.pickerWrapper.pick(a.ctx, cs.callInfo.failFast, pickInfo) + a.transport, a.pickResult = pick.transport, pick.result if err != nil { if de, ok := err.(dropError); ok { err = de.error @@ -481,6 +482,11 @@ func (a *csAttempt) getTransport() error { if a.trInfo != nil { a.trInfo.firstLine.SetRemoteAddr(a.transport.RemoteAddr()) } + if pick.blocked { + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, &stats.DelayedPickComplete{}) + } + } return nil } @@ -1580,6 +1586,7 @@ type serverStream struct { s *transport.ServerStream p *parser codec baseCodec + desc *StreamDesc compressorV0 Compressor compressorV1 encoding.Compressor @@ -1588,6 +1595,8 @@ type serverStream struct { sendCompressorName string + recvFirstMsg bool // set after the first message is received + maxReceiveMessageSize int maxSendMessageSize int trInfo *traceInfo @@ -1774,6 +1783,10 @@ func (ss *serverStream) RecvMsg(m any) (err error) { binlog.Log(ss.ctx, chc) } } + // Received no request msg for non-client streaming rpcs. + if !ss.desc.ClientStreams && !ss.recvFirstMsg { + return status.Error(codes.Internal, "cardinality violation: received no request message from non-client-streaming RPC") + } return err } if err == io.ErrUnexpectedEOF { @@ -1781,6 +1794,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } return toRPCErr(err) } + ss.recvFirstMsg = true if len(ss.statsHandler) != 0 { for _, sh := range ss.statsHandler { sh.HandleRPC(ss.s.Context(), &stats.InPayload{ @@ -1800,7 +1814,19 @@ func (ss *serverStream) RecvMsg(m any) (err error) { binlog.Log(ss.ctx, cm) } } - return nil + + if ss.desc.ClientStreams { + // Subsequent messages should be received by subsequent RecvMsg calls. + return nil + } + // Special handling for non-client-stream rpcs. + // This recv expects EOF or errors, so we don't collect inPayload. + if err := recv(ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, nil, ss.decompressorV1, true); err == io.EOF { + return nil + } else if err != nil { + return err + } + return status.Error(codes.Internal, "cardinality violation: received multiple request messages for non-client-streaming RPC") } // MethodFromServerStream returns the method string for the input stream. diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 8b0e5f973d6d..bc1eb290f690 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.74.2" +const Version = "1.75.0" diff --git a/vendor/knative.dev/hack/release.sh b/vendor/knative.dev/hack/release.sh index f56ceb5cf74b..961c06584f8c 100644 --- a/vendor/knative.dev/hack/release.sh +++ b/vendor/knative.dev/hack/release.sh @@ -219,10 +219,9 @@ function prepare_dot_release() { # Use the original tag (ie. potentially with a knative- prefix) when determining the last version commit sha local github_tag="$(gh_tool release list --json tagName --jq '.[].tagName' | grep "${last_version}")" local last_release_commit="$(git rev-list -n 1 "${github_tag}")" - local last_release_commit_filtered="$(git rev-list --invert-grep --grep '^(?!\s*>).*?\[skip-dot-release\]' -n 1 "${github_tag}")" + local last_release_commit_filtered="$(git rev-list --invert-grep --grep '^(?!\s*>).*?\[skip-dot-release\]'-n 1 "${github_tag}")" local release_branch_commit="$(git rev-list -n 1 upstream/"${RELEASE_BRANCH}")" local release_branch_commit_filtered="$(git rev-list --invert-grep --grep '^(?!\s*>).*?\[skip-dot-release\]' -n 1 upstream/"${RELEASE_BRANCH}")" - [[ -n "${last_release_commit}" ]] || abort "cannot get last release commit" [[ -n "${release_branch_commit}" ]] || abort "cannot get release branch last commit" [[ -n "${last_release_commit_filtered}" ]] || abort "cannot get filtered last release commit" diff --git a/vendor/modules.txt b/vendor/modules.txt index 21684e58a8ac..c80a34e68253 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -766,15 +766,15 @@ google.golang.org/api/option google.golang.org/api/option/internaloption google.golang.org/api/transport/http google.golang.org/api/transport/http/internal/propagation -# google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 +# google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 ## explicit; go 1.23.0 google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20250818200422-3122310a409c ## explicit; go 1.23.0 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.74.2 +# google.golang.org/grpc v1.75.0 ## explicit; go 1.23.0 google.golang.org/grpc google.golang.org/grpc/attributes @@ -1450,7 +1450,7 @@ k8s.io/utils/net k8s.io/utils/pointer k8s.io/utils/ptr k8s.io/utils/trace -# knative.dev/caching v0.0.0-20250909014531-e918af7eb00b +# knative.dev/caching v0.0.0-20250821143751-b982aa0cd1c1 ## explicit; go 1.24.0 knative.dev/caching/config knative.dev/caching/pkg/apis/caching @@ -1471,10 +1471,10 @@ knative.dev/caching/pkg/client/injection/informers/caching/v1alpha1/image/fake knative.dev/caching/pkg/client/injection/informers/factory knative.dev/caching/pkg/client/injection/informers/factory/fake knative.dev/caching/pkg/client/listers/caching/v1alpha1 -# knative.dev/hack v0.0.0-20250902153942-1499de21e119 +# knative.dev/hack v0.0.0-20250819212847-f88b7db09b1c ## explicit; go 1.21 knative.dev/hack -# knative.dev/networking v0.0.0-20250909015233-e3b68fc57bea +# knative.dev/networking v0.0.0-20250821144952-042b64d7bbde ## explicit; go 1.24.0 knative.dev/networking/config knative.dev/networking/pkg From 91d5b0af8b2feb459c0a34c423443a0b09777ef4 Mon Sep 17 00:00:00 2001 From: Knative Automation Date: Tue, 2 Sep 2025 13:06:43 -0400 Subject: [PATCH 02/13] upgrade to latest dependencies (#16053) bumping knative.dev/hack f88b7db...af735b2: > af735b2 Fix dot releases (# 434) Signed-off-by: Knative Automation --- vendor/knative.dev/hack/release.sh | 3 ++- vendor/modules.txt | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/vendor/knative.dev/hack/release.sh b/vendor/knative.dev/hack/release.sh index 961c06584f8c..f56ceb5cf74b 100644 --- a/vendor/knative.dev/hack/release.sh +++ b/vendor/knative.dev/hack/release.sh @@ -219,9 +219,10 @@ function prepare_dot_release() { # Use the original tag (ie. potentially with a knative- prefix) when determining the last version commit sha local github_tag="$(gh_tool release list --json tagName --jq '.[].tagName' | grep "${last_version}")" local last_release_commit="$(git rev-list -n 1 "${github_tag}")" - local last_release_commit_filtered="$(git rev-list --invert-grep --grep '^(?!\s*>).*?\[skip-dot-release\]'-n 1 "${github_tag}")" + local last_release_commit_filtered="$(git rev-list --invert-grep --grep '^(?!\s*>).*?\[skip-dot-release\]' -n 1 "${github_tag}")" local release_branch_commit="$(git rev-list -n 1 upstream/"${RELEASE_BRANCH}")" local release_branch_commit_filtered="$(git rev-list --invert-grep --grep '^(?!\s*>).*?\[skip-dot-release\]' -n 1 upstream/"${RELEASE_BRANCH}")" + [[ -n "${last_release_commit}" ]] || abort "cannot get last release commit" [[ -n "${release_branch_commit}" ]] || abort "cannot get release branch last commit" [[ -n "${last_release_commit_filtered}" ]] || abort "cannot get filtered last release commit" diff --git a/vendor/modules.txt b/vendor/modules.txt index c80a34e68253..ff23bcfc0302 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1471,7 +1471,7 @@ knative.dev/caching/pkg/client/injection/informers/caching/v1alpha1/image/fake knative.dev/caching/pkg/client/injection/informers/factory knative.dev/caching/pkg/client/injection/informers/factory/fake knative.dev/caching/pkg/client/listers/caching/v1alpha1 -# knative.dev/hack v0.0.0-20250819212847-f88b7db09b1c +# knative.dev/hack v0.0.0-20250902154142-af735b2738d6 ## explicit; go 1.21 knative.dev/hack # knative.dev/networking v0.0.0-20250821144952-042b64d7bbde From 8829034eb759b8a1078755e1abeda0d0befaa4a0 Mon Sep 17 00:00:00 2001 From: Elijah Roussos Date: Fri, 5 Sep 2025 11:44:22 -0500 Subject: [PATCH 03/13] fix: Ensure all connections persist until queue-proxy drain Fixes: Websockets (and some HTTP) closing abruptly when queue-proxy undergoes drain. Due to hijacked connections in net/http not being respected when server.Shutdown is called, any active websocket connections currently end as soon as the queue-proxy calls .Shutdown. See gorilla/websocket#448 and golang/go#17721 for details. This patch fixes this issue by introducing an atomic counter of active requests, which increments as a request comes in and decrements as a request handler terminates. During drain, this counter must reach zero or adhere to the revision timeout, in order to call .Shutdown. Further, this prevents pre-mature closing of connections in the user container due to misconfigured SIGTERM handling, by delaying the SIGTERM send until the queue-proxy has verified it has fully drained. --- .gitignore | 3 + pkg/activator/net/throttler.go | 19 +- pkg/activator/net/throttler_test.go | 20 +- pkg/autoscaler/metrics/stat.pb.go | 3 +- pkg/queue/breaker.go | 36 +- pkg/queue/breaker_test.go | 8 +- pkg/queue/request_metric.go | 2 +- pkg/queue/sharedmain/handlers.go | 32 +- pkg/queue/sharedmain/handlers_test.go | 71 ++ pkg/queue/sharedmain/main.go | 33 +- pkg/reconciler/revision/resources/deploy.go | 70 +- .../resources/deploy_lifecycle_test.go | 95 ++ .../revision/resources/deploy_test.go | 133 ++- pkg/reconciler/revision/resources/queue.go | 15 + .../revision/resources/queue_test.go | 812 +++++++++--------- pkg/webhook/podspec_dryrun.go | 10 + test/e2e/websocket_test.go | 44 + test/prober.go | 2 +- test/test_images/grpc-ping/proto/ping.pb.go | 40 +- 19 files changed, 945 insertions(+), 503 deletions(-) create mode 100644 pkg/queue/sharedmain/handlers_test.go create mode 100644 pkg/reconciler/revision/resources/deploy_lifecycle_test.go diff --git a/.gitignore b/.gitignore index 85baa82ae0b0..bb32477feda6 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,6 @@ # Temporary output of build tools bazel-* *.out + +# Repomix outputs +repomix*.xml \ No newline at end of file diff --git a/pkg/activator/net/throttler.go b/pkg/activator/net/throttler.go index 0ef298c48db4..daf768ee0ce3 100644 --- a/pkg/activator/net/throttler.go +++ b/pkg/activator/net/throttler.go @@ -100,7 +100,13 @@ func (p *podTracker) Capacity() int { if p.b == nil { return 1 } - return p.b.Capacity() + capacity := p.b.Capacity() + // Safe conversion: breaker capacity is always reasonable for int + // Check for overflow before conversion + if capacity > 0x7FFFFFFF { + return 0x7FFFFFFF // Return max int32 value + } + return int(capacity) } func (p *podTracker) UpdateConcurrency(c int) { @@ -118,7 +124,7 @@ func (p *podTracker) Reserve(ctx context.Context) (func(), bool) { } type breaker interface { - Capacity() int + Capacity() uint64 Maybe(ctx context.Context, thunk func()) error UpdateConcurrency(int) Reserve(ctx context.Context) (func(), bool) @@ -721,8 +727,13 @@ func newInfiniteBreaker(logger *zap.SugaredLogger) *infiniteBreaker { } // Capacity returns the current capacity of the breaker -func (ib *infiniteBreaker) Capacity() int { - return int(ib.concurrency.Load()) +func (ib *infiniteBreaker) Capacity() uint64 { + concurrency := ib.concurrency.Load() + // Safe conversion: concurrency is int32 and we check for non-negative + if concurrency >= 0 { + return uint64(concurrency) + } + return 0 } func zeroOrOne(x int) int32 { diff --git a/pkg/activator/net/throttler_test.go b/pkg/activator/net/throttler_test.go index ed727a26c79b..bef964fdef61 100644 --- a/pkg/activator/net/throttler_test.go +++ b/pkg/activator/net/throttler_test.go @@ -226,7 +226,7 @@ func TestThrottlerUpdateCapacity(t *testing.T) { rt.breaker = newInfiniteBreaker(logger) } rt.updateCapacity(tt.capacity) - if got := rt.breaker.Capacity(); got != tt.want { + if got := rt.breaker.Capacity(); got != uint64(tt.want) { t.Errorf("Capacity = %d, want: %d", got, tt.want) } if tt.checkAssignedPod { @@ -560,7 +560,7 @@ func TestThrottlerSuccesses(t *testing.T) { rt.mux.RLock() defer rt.mux.RUnlock() if *cc != 0 { - return rt.activatorIndex.Load() != -1 && rt.breaker.Capacity() == wantCapacity && + return rt.activatorIndex.Load() != -1 && rt.breaker.Capacity() == uint64(wantCapacity) && sortedTrackers(rt.assignedTrackers), nil } // If CC=0 then verify number of backends, rather the capacity of breaker. @@ -638,7 +638,7 @@ func TestPodAssignmentFinite(t *testing.T) { if got, want := trackerDestSet(rt.assignedTrackers), sets.New("ip0", "ip4"); !got.Equal(want) { t.Errorf("Assigned trackers = %v, want: %v, diff: %s", got, want, cmp.Diff(want, got)) } - if got, want := rt.breaker.Capacity(), 2*42; got != want { + if got, want := rt.breaker.Capacity(), uint64(2*42); got != want { t.Errorf("TotalCapacity = %d, want: %d", got, want) } if got, want := rt.assignedTrackers[0].Capacity(), 42; got != want { @@ -657,7 +657,7 @@ func TestPodAssignmentFinite(t *testing.T) { if got, want := len(rt.assignedTrackers), 0; got != want { t.Errorf("NumAssignedTrackers = %d, want: %d", got, want) } - if got, want := rt.breaker.Capacity(), 0; got != want { + if got, want := rt.breaker.Capacity(), uint64(0); got != want { t.Errorf("TotalCapacity = %d, want: %d", got, want) } } @@ -687,7 +687,7 @@ func TestPodAssignmentInfinite(t *testing.T) { if got, want := len(rt.assignedTrackers), 3; got != want { t.Errorf("NumAssigned trackers = %d, want: %d", got, want) } - if got, want := rt.breaker.Capacity(), 1; got != want { + if got, want := rt.breaker.Capacity(), uint64(1); got != want { t.Errorf("TotalCapacity = %d, want: %d", got, want) } if got, want := rt.assignedTrackers[0].Capacity(), 1; got != want { @@ -703,7 +703,7 @@ func TestPodAssignmentInfinite(t *testing.T) { if got, want := len(rt.assignedTrackers), 0; got != want { t.Errorf("NumAssignedTrackers = %d, want: %d", got, want) } - if got, want := rt.breaker.Capacity(), 0; got != want { + if got, want := rt.breaker.Capacity(), uint64(0); got != want { t.Errorf("TotalCapacity = %d, want: %d", got, want) } } @@ -935,7 +935,7 @@ func TestInfiniteBreaker(t *testing.T) { } // Verify initial condition. - if got, want := b.Capacity(), 0; got != want { + if got, want := b.Capacity(), uint64(0); got != want { t.Errorf("Cap=%d, want: %d", got, want) } if _, ok := b.Reserve(context.Background()); ok != true { @@ -949,7 +949,7 @@ func TestInfiniteBreaker(t *testing.T) { } b.UpdateConcurrency(1) - if got, want := b.Capacity(), 1; got != want { + if got, want := b.Capacity(), uint64(1); got != want { t.Errorf("Cap=%d, want: %d", got, want) } @@ -976,7 +976,7 @@ func TestInfiniteBreaker(t *testing.T) { if err := b.Maybe(ctx, nil); err == nil { t.Error("Should have failed, but didn't") } - if got, want := b.Capacity(), 0; got != want { + if got, want := b.Capacity(), uint64(0); got != want { t.Errorf("Cap=%d, want: %d", got, want) } @@ -1212,7 +1212,7 @@ func TestAssignSlice(t *testing.T) { t.Errorf("Got=%v, want: %v; diff: %s", got, want, cmp.Diff(want, got, opt)) } - if got, want := got[0].b.Capacity(), 0; got != want { + if got, want := got[0].b.Capacity(), uint64(0); got != want { t.Errorf("Capacity for the tail pod = %d, want: %d", got, want) } }) diff --git a/pkg/autoscaler/metrics/stat.pb.go b/pkg/autoscaler/metrics/stat.pb.go index 99df6f71699d..58024c8ed1a5 100644 --- a/pkg/autoscaler/metrics/stat.pb.go +++ b/pkg/autoscaler/metrics/stat.pb.go @@ -22,10 +22,11 @@ package metrics import ( encoding_binary "encoding/binary" fmt "fmt" - proto "github.com/gogo/protobuf/proto" io "io" math "math" math_bits "math/bits" + + proto "github.com/gogo/protobuf/proto" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/pkg/queue/breaker.go b/pkg/queue/breaker.go index 918f57b743a5..4c774718f419 100644 --- a/pkg/queue/breaker.go +++ b/pkg/queue/breaker.go @@ -43,7 +43,7 @@ type BreakerParams struct { // executions in excess of the concurrency limit. Function call attempts // beyond the limit of the queue are failed immediately. type Breaker struct { - inFlight atomic.Int64 + pending atomic.Int64 totalSlots int64 sem *semaphore @@ -83,10 +83,10 @@ func NewBreaker(params BreakerParams) *Breaker { func (b *Breaker) tryAcquirePending() bool { // This is an atomic version of: // - // if inFlight == totalSlots { + // if pending == totalSlots { // return false // } else { - // inFlight++ + // pending++ // return true // } // @@ -96,11 +96,12 @@ func (b *Breaker) tryAcquirePending() bool { // (it fails if we're raced to it) or if we don't fulfill the condition // anymore. for { - cur := b.inFlight.Load() + cur := b.pending.Load() + // 10000 + containerConcurrency = totalSlots if cur == b.totalSlots { return false } - if b.inFlight.CompareAndSwap(cur, cur+1) { + if b.pending.CompareAndSwap(cur, cur+1) { return true } } @@ -108,7 +109,7 @@ func (b *Breaker) tryAcquirePending() bool { // releasePending releases a slot on the pending "queue". func (b *Breaker) releasePending() { - b.inFlight.Add(-1) + b.pending.Add(-1) } // Reserve reserves an execution slot in the breaker, to permit @@ -154,9 +155,9 @@ func (b *Breaker) Maybe(ctx context.Context, thunk func()) error { return nil } -// InFlight returns the number of requests currently in flight in this breaker. -func (b *Breaker) InFlight() int { - return int(b.inFlight.Load()) +// Pending returns the number of requests currently pending to this breaker. +func (b *Breaker) Pending() int { + return int(b.pending.Load()) } // UpdateConcurrency updates the maximum number of in-flight requests. @@ -165,10 +166,15 @@ func (b *Breaker) UpdateConcurrency(size int) { } // Capacity returns the number of allowed in-flight requests on this breaker. -func (b *Breaker) Capacity() int { +func (b *Breaker) Capacity() uint64 { return b.sem.Capacity() } +// InFlight returns the number of requests currently in-flight on this breaker. +func (b *Breaker) InFlight() uint64 { + return b.sem.InFlight() +} + // newSemaphore creates a semaphore with the desired initial capacity. func newSemaphore(maxCapacity, initialCapacity int) *semaphore { queue := make(chan struct{}, maxCapacity) @@ -288,9 +294,15 @@ func (s *semaphore) updateCapacity(size int) { } // Capacity is the capacity of the semaphore. -func (s *semaphore) Capacity() int { +func (s *semaphore) Capacity() uint64 { capacity, _ := unpack(s.state.Load()) - return int(capacity) //nolint:gosec // TODO(dprotaso) - capacity should be uint64 + return capacity +} + +// InFlight is the number of the inflight requests of the semaphore. +func (s *semaphore) InFlight() uint64 { + _, inFlight := unpack(s.state.Load()) + return inFlight } // unpack takes an uint64 and returns two uint32 (as uint64) comprised of the leftmost diff --git a/pkg/queue/breaker_test.go b/pkg/queue/breaker_test.go index 547959a1da54..c7e838f82bdc 100644 --- a/pkg/queue/breaker_test.go +++ b/pkg/queue/breaker_test.go @@ -212,12 +212,12 @@ func TestBreakerUpdateConcurrency(t *testing.T) { params := BreakerParams{QueueDepth: 1, MaxConcurrency: 1, InitialCapacity: 0} b := NewBreaker(params) b.UpdateConcurrency(1) - if got, want := b.Capacity(), 1; got != want { + if got, want := b.Capacity(), uint64(1); got != want { t.Errorf("Capacity() = %d, want: %d", got, want) } b.UpdateConcurrency(0) - if got, want := b.Capacity(), 0; got != want { + if got, want := b.Capacity(), uint64(0); got != want { t.Errorf("Capacity() = %d, want: %d", got, want) } } @@ -294,12 +294,12 @@ func TestSemaphoreRelease(t *testing.T) { func TestSemaphoreUpdateCapacity(t *testing.T) { const initialCapacity = 1 sem := newSemaphore(3, initialCapacity) - if got, want := sem.Capacity(), 1; got != want { + if got, want := sem.Capacity(), uint64(1); got != want { t.Errorf("Capacity = %d, want: %d", got, want) } sem.acquire(context.Background()) sem.updateCapacity(initialCapacity + 2) - if got, want := sem.Capacity(), 3; got != want { + if got, want := sem.Capacity(), uint64(3); got != want { t.Errorf("Capacity = %d, want: %d", got, want) } } diff --git a/pkg/queue/request_metric.go b/pkg/queue/request_metric.go index a1406d2c41ce..50c4f2063b2c 100644 --- a/pkg/queue/request_metric.go +++ b/pkg/queue/request_metric.go @@ -85,7 +85,7 @@ func (h *appRequestMetricsHandler) ServeHTTP(w http.ResponseWriter, r *http.Requ startTime := h.clock.Now() if h.breaker != nil { - h.queueLen.Record(r.Context(), int64(h.breaker.InFlight())) + h.queueLen.Record(r.Context(), int64(h.breaker.Pending())) } defer func() { // Filter probe requests for revision metrics. diff --git a/pkg/queue/sharedmain/handlers.go b/pkg/queue/sharedmain/handlers.go index 86a1694def04..44c329bcf746 100644 --- a/pkg/queue/sharedmain/handlers.go +++ b/pkg/queue/sharedmain/handlers.go @@ -18,8 +18,11 @@ package sharedmain import ( "context" + "fmt" "net" "net/http" + "strings" + "sync/atomic" "time" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" @@ -30,6 +33,7 @@ import ( netheader "knative.dev/networking/pkg/http/header" netproxy "knative.dev/networking/pkg/http/proxy" netstats "knative.dev/networking/pkg/http/stats" + "knative.dev/pkg/network" pkghandler "knative.dev/pkg/network/handlers" "knative.dev/serving/pkg/activator" pkghttp "knative.dev/serving/pkg/http" @@ -46,6 +50,7 @@ func mainHandler( logger *zap.SugaredLogger, mp metric.MeterProvider, tp trace.TracerProvider, + pendingRequests *atomic.Int32, ) (http.Handler, *pkghandler.Drainer) { target := net.JoinHostPort("127.0.0.1", env.UserPort) tracer := tp.Tracer("knative.dev/serving/pkg/queue") @@ -73,6 +78,7 @@ func mainHandler( composedHandler = requestAppMetricsHandler(logger, composedHandler, breaker, mp) composedHandler = queue.ProxyHandler(tracer, breaker, stats, composedHandler) + composedHandler = queue.ForwardedShimHandler(composedHandler) composedHandler = handler.NewTimeoutHandler(composedHandler, "request timeout", func(r *http.Request) (time.Duration, time.Duration, time.Duration) { return timeout, responseStartTimeout, idleTimeout @@ -81,6 +87,8 @@ func mainHandler( composedHandler = queue.NewRouteTagHandler(composedHandler) composedHandler = withFullDuplex(composedHandler, env.EnableHTTPFullDuplex, logger) + composedHandler = withRequestCounter(composedHandler, pendingRequests) + drainer := &pkghandler.Drainer{ QuietPeriod: drainSleepDuration, // Add Activator probe header to the drainer so it can handle probes directly from activator @@ -105,11 +113,10 @@ func mainHandler( return !netheader.IsProbe(r) }), ) - return composedHandler, drainer } -func adminHandler(ctx context.Context, logger *zap.SugaredLogger, drainer *pkghandler.Drainer) http.Handler { +func adminHandler(ctx context.Context, logger *zap.SugaredLogger, drainer *pkghandler.Drainer, pendingRequests *atomic.Int32) http.Handler { mux := http.NewServeMux() mux.HandleFunc(queue.RequestQueueDrainPath, func(w http.ResponseWriter, r *http.Request) { logger.Info("Attached drain handler from user-container", r) @@ -130,6 +137,17 @@ func adminHandler(ctx context.Context, logger *zap.SugaredLogger, drainer *pkgha w.WriteHeader(http.StatusOK) }) + // New endpoint that returns 200 only when all requests are drained + mux.HandleFunc("/drain-complete", func(w http.ResponseWriter, r *http.Request) { + if pendingRequests.Load() <= 0 { + w.WriteHeader(http.StatusOK) + w.Write([]byte("drained")) + } else { + w.WriteHeader(http.StatusServiceUnavailable) + fmt.Fprintf(w, "pending requests: %d", pendingRequests.Load()) + } + }) + return mux } @@ -145,3 +163,13 @@ func withFullDuplex(h http.Handler, enableFullDuplex bool, logger *zap.SugaredLo h.ServeHTTP(w, r) }) } + +func withRequestCounter(h http.Handler, pendingRequests *atomic.Int32) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Header.Get(network.ProbeHeaderName) != network.ProbeHeaderValue && !strings.HasPrefix(r.Header.Get("User-Agent"), "kube-probe/") { + pendingRequests.Add(1) + defer pendingRequests.Add(-1) + } + h.ServeHTTP(w, r) + }) +} diff --git a/pkg/queue/sharedmain/handlers_test.go b/pkg/queue/sharedmain/handlers_test.go new file mode 100644 index 000000000000..64d0b853ee9b --- /dev/null +++ b/pkg/queue/sharedmain/handlers_test.go @@ -0,0 +1,71 @@ +/* +Copyright 2024 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sharedmain + +import ( + "context" + "net/http" + "net/http/httptest" + "sync/atomic" + "testing" + + "go.uber.org/zap" + pkghandler "knative.dev/pkg/network/handlers" +) + +func TestDrainCompleteEndpoint(t *testing.T) { + logger := zap.NewNop().Sugar() + drainer := &pkghandler.Drainer{} + + t.Run("returns 200 when no pending requests", func(t *testing.T) { + pendingRequests := atomic.Int32{} + pendingRequests.Store(0) + + handler := adminHandler(context.Background(), logger, drainer, &pendingRequests) + + req := httptest.NewRequest(http.MethodGet, "/drain-complete", nil) + w := httptest.NewRecorder() + + handler.ServeHTTP(w, req) + + if w.Code != http.StatusOK { + t.Errorf("Expected status 200, got %d", w.Code) + } + if w.Body.String() != "drained" { + t.Errorf("Expected body 'drained', got %s", w.Body.String()) + } + }) + + t.Run("returns 503 when requests are pending", func(t *testing.T) { + pendingRequests := atomic.Int32{} + pendingRequests.Store(5) + + handler := adminHandler(context.Background(), logger, drainer, &pendingRequests) + + req := httptest.NewRequest(http.MethodGet, "/drain-complete", nil) + w := httptest.NewRecorder() + + handler.ServeHTTP(w, req) + + if w.Code != http.StatusServiceUnavailable { + t.Errorf("Expected status 503, got %d", w.Code) + } + if w.Body.String() != "pending requests: 5" { + t.Errorf("Expected body 'pending requests: 5', got %s", w.Body.String()) + } + }) +} diff --git a/pkg/queue/sharedmain/main.go b/pkg/queue/sharedmain/main.go index 5c1b31a65ccf..af5d613c24bc 100644 --- a/pkg/queue/sharedmain/main.go +++ b/pkg/queue/sharedmain/main.go @@ -25,6 +25,7 @@ import ( "net/http" "os" "strconv" + "sync/atomic" "time" "github.com/kelseyhightower/envconfig" @@ -58,7 +59,7 @@ const ( // Duration the /wait-for-drain handler should wait before returning. // This is to give networking a little bit more time to remove the pod // from its configuration and propagate that to all loadbalancers and nodes. - drainSleepDuration = 30 * time.Second + drainSleepDuration = 15 * time.Second // certPath is the path for the server certificate mounted by queue-proxy. certPath = queue.CertDirectory + "/" + certificates.CertName @@ -157,6 +158,8 @@ func Main(opts ...Option) error { d := Defaults{ Ctx: signals.NewContext(), } + pendingRequests := atomic.Int32{} + pendingRequests.Store(0) // Parse the environment. env := config{ @@ -231,9 +234,8 @@ func Main(opts ...Option) error { // Enable TLS when certificate is mounted. tlsEnabled := exists(logger, certPath) && exists(logger, keyPath) - - mainHandler, drainer := mainHandler(env, d.Transport, probe, stats, logger, mp, tp) - adminHandler := adminHandler(d.Ctx, logger, drainer) + mainHandler, drainer := mainHandler(env, d.Transport, probe, stats, logger, mp, tp, &pendingRequests) + adminHandler := adminHandler(d.Ctx, logger, drainer, &pendingRequests) // Enable TLS server when activator server certs are mounted. // At this moment activator with TLS does not disable HTTP. @@ -271,6 +273,9 @@ func Main(opts ...Option) error { logger.Info("Starting queue-proxy") + // Clean up any stale drain signal file from previous runs + os.Remove("/var/run/knative/drain-complete") + errCh := make(chan error) for name, server := range httpServers { go func(name string, s *http.Server) { @@ -304,9 +309,27 @@ func Main(opts ...Option) error { return err case <-d.Ctx.Done(): logger.Info("Received TERM signal, attempting to gracefully shutdown servers.") - logger.Infof("Sleeping %v to allow K8s propagation of non-ready state", drainSleepDuration) drainer.Drain() + // Wait on active requests to complete. This is done explicitly + // to avoid closing any connections which have been highjacked, + // as in net/http `.Shutdown` would do so ungracefully. + // See https://github.com/golang/go/issues/17721 + ticker := time.NewTicker(1 * time.Second) + defer ticker.Stop() + logger.Infof("Drain: waiting for %d pending requests to complete", pendingRequests.Load()) + WaitOnPendingRequests: + for range ticker.C { + if pendingRequests.Load() <= 0 { + logger.Infof("Drain: all pending requests completed") + // Write drain signal file for PreStop hooks to detect + if err := os.WriteFile("/var/run/knative/drain-complete", []byte(""), 0o600); err != nil { + logger.Errorw("Failed to write drain signal file", zap.Error(err)) + } + break WaitOnPendingRequests + } + } + for name, srv := range httpServers { logger.Info("Shutting down server: ", name) if err := srv.Shutdown(context.Background()); err != nil { diff --git a/pkg/reconciler/revision/resources/deploy.go b/pkg/reconciler/revision/resources/deploy.go index fff3847cb7eb..d2a4308efa5c 100644 --- a/pkg/reconciler/revision/resources/deploy.go +++ b/pkg/reconciler/revision/resources/deploy.go @@ -98,18 +98,17 @@ var ( ReadOnly: true, } - // This PreStop hook is actually calling an endpoint on the queue-proxy - // because of the way PreStop hooks are called by kubelet. We use this - // to block the user-container from exiting before the queue-proxy is ready - // to exit so we can guarantee that there are no more requests in flight. - userLifecycle = &corev1.Lifecycle{ - PreStop: &corev1.LifecycleHandler{ - HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt(networking.QueueAdminPort), - Path: queue.RequestQueueDrainPath, - }, + varDrainVolume = corev1.Volume{ + Name: "knative-drain-signal", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, }, } + + varDrainVolumeMount = corev1.VolumeMount{ + Name: "knative-drain-signal", + MountPath: "/var/run/knative", + } ) func addToken(tokenVolume *corev1.Volume, filename string, audience string, expiry *int64) { @@ -173,7 +172,9 @@ func makePodSpec(rev *v1.Revision, cfg *config.Config) (*corev1.PodSpec, error) return nil, fmt.Errorf("failed to create queue-proxy container: %w", err) } + // Add drain volume for signaling between containers var extraVolumes []corev1.Volume + extraVolumes = append(extraVolumes, varDrainVolume) podInfoFeature, podInfoExists := rev.Annotations[apiconfig.QueueProxyPodInfoFeatureKey] @@ -261,7 +262,7 @@ func BuildUserContainers(rev *v1.Revision) []corev1.Container { func makeContainer(container corev1.Container, rev *v1.Revision) corev1.Container { // Adding or removing an overwritten corev1.Container field here? Don't forget to // update the fieldmasks / validations in pkg/apis/serving - container.Lifecycle = userLifecycle + container.Lifecycle = buildLifecycleWithDrainWait(container.Lifecycle) container.Env = append(container.Env, getKnativeEnvVar(rev)...) // Explicitly disable stdin and tty allocation @@ -279,6 +280,9 @@ func makeContainer(container corev1.Container, rev *v1.Revision) corev1.Containe } } + // Mount the drain volume for PreStop hook to check drain signal + container.VolumeMounts = append(container.VolumeMounts, varDrainVolumeMount) + return container } @@ -294,6 +298,50 @@ func makeServingContainer(servingContainer corev1.Container, rev *v1.Revision) c return container } +// buildLifecycleWithDrainWait preserves any existing pre-stop hooks and adds the drain wait +func buildLifecycleWithDrainWait(existingLifecycle *corev1.Lifecycle) *corev1.Lifecycle { + // If there's an existing lifecycle with a pre-stop hook, preserve it + if existingLifecycle != nil && existingLifecycle.PreStop != nil { + // Convert existing pre-stop to exec command if needed + var existingCommand string + if existingLifecycle.PreStop.Exec != nil { + existingCommand = strings.Join(existingLifecycle.PreStop.Exec.Command, " ") + } else if existingLifecycle.PreStop.HTTPGet != nil { + // Convert HTTP GET to curl command + port := existingLifecycle.PreStop.HTTPGet.Port.String() + path := existingLifecycle.PreStop.HTTPGet.Path + if path == "" { + path = "/" + } + existingCommand = fmt.Sprintf("curl -f http://localhost:%s%s", port, path) + } + + // Combine: run existing hook first, then wait for drain + return &corev1.Lifecycle{ + PreStop: &corev1.LifecycleHandler{ + Exec: &corev1.ExecAction{ + Command: []string{ + "/bin/sh", "-c", + existingCommand + "; until [ -f /var/run/knative/drain-complete ]; do sleep 0.1; done", + }, + }, + }, + } + } + + // No existing lifecycle, just add the drain wait + return &corev1.Lifecycle{ + PreStop: &corev1.LifecycleHandler{ + Exec: &corev1.ExecAction{ + Command: []string{ + "/bin/sh", "-c", + "until [ -f /var/run/knative/drain-complete ]; do sleep 0.1; done", + }, + }, + }, + } +} + // BuildPodSpec creates a PodSpec from the given revision and containers. // cfg can be passed as nil if not within revision reconciliation context. func BuildPodSpec(rev *v1.Revision, containers []corev1.Container, cfg *config.Config) *corev1.PodSpec { diff --git a/pkg/reconciler/revision/resources/deploy_lifecycle_test.go b/pkg/reconciler/revision/resources/deploy_lifecycle_test.go new file mode 100644 index 000000000000..0a04e43e58e6 --- /dev/null +++ b/pkg/reconciler/revision/resources/deploy_lifecycle_test.go @@ -0,0 +1,95 @@ +/* +Copyright 2024 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func TestBuildLifecycleWithDrainWait(t *testing.T) { + drainCommand := "until [ -f /var/run/knative/drain-complete ]; do sleep 0.1; done" + + tests := []struct { + name string + existing *corev1.Lifecycle + want []string + }{ + { + name: "no existing lifecycle", + existing: nil, + want: []string{"/bin/sh", "-c", drainCommand}, + }, + { + name: "existing exec command", + existing: &corev1.Lifecycle{ + PreStop: &corev1.LifecycleHandler{ + Exec: &corev1.ExecAction{ + Command: []string{"/app/cleanup.sh"}, + }, + }, + }, + want: []string{"/bin/sh", "-c", "/app/cleanup.sh; " + drainCommand}, + }, + { + name: "existing HTTP GET", + existing: &corev1.Lifecycle{ + PreStop: &corev1.LifecycleHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(8080), + Path: "/shutdown", + }, + }, + }, + want: []string{"/bin/sh", "-c", "curl -f http://localhost:8080/shutdown; " + drainCommand}, + }, + { + name: "existing HTTP GET without path", + existing: &corev1.Lifecycle{ + PreStop: &corev1.LifecycleHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(9090), + }, + }, + }, + want: []string{"/bin/sh", "-c", "curl -f http://localhost:9090/; " + drainCommand}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := buildLifecycleWithDrainWait(tt.existing) + + if result == nil || result.PreStop == nil || result.PreStop.Exec == nil { + t.Fatal("Expected lifecycle with exec prestop handler") + } + + gotCommand := result.PreStop.Exec.Command + if len(gotCommand) != len(tt.want) { + t.Errorf("Command length mismatch: got %d, want %d", len(gotCommand), len(tt.want)) + } + + for i, cmd := range tt.want { + if i < len(gotCommand) && gotCommand[i] != cmd { + t.Errorf("Command[%d]: got %q, want %q", i, gotCommand[i], cmd) + } + } + }) + } +} diff --git a/pkg/reconciler/revision/resources/deploy_test.go b/pkg/reconciler/revision/resources/deploy_test.go index 897428fff91e..122e68e8de7d 100644 --- a/pkg/reconciler/revision/resources/deploy_test.go +++ b/pkg/reconciler/revision/resources/deploy_test.go @@ -56,10 +56,14 @@ var ( Name: servingContainerName, Image: "busybox", Ports: buildContainerPorts(v1.DefaultUserPort), - Lifecycle: userLifecycle, + Lifecycle: buildLifecycleWithDrainWait(nil), TerminationMessagePolicy: corev1.TerminationMessageFallbackToLogsOnError, Stdin: false, TTY: false, + VolumeMounts: []corev1.VolumeMount{{ + Name: "knative-drain-signal", + MountPath: "/var/run/knative", + }}, Env: []corev1.EnvVar{{ Name: "PORT", Value: "8080", @@ -87,7 +91,8 @@ var ( }}, }, }, - PeriodSeconds: 0, + PeriodSeconds: 1, + FailureThreshold: 1, }, SecurityContext: queueSecurityContext, Env: []corev1.EnvVar{{ @@ -143,7 +148,7 @@ var ( Value: system.Namespace(), }, { Name: "SERVING_READINESS_PROBE", - Value: fmt.Sprintf(`{"tcpSocket":{"port":%d,"host":"127.0.0.1"}}`, v1.DefaultUserPort), + Value: fmt.Sprintf(`{"tcpSocket":{"port":%d,"host":"127.0.0.1"},"failureThreshold":1}`, v1.DefaultUserPort), }, { Name: "HOST_IP", ValueFrom: &corev1.EnvVarSource{ @@ -165,11 +170,23 @@ var ( Name: "OBSERVABILITY_CONFIG", Value: `{"tracing":{},"metrics":{},"runtime":{},"requestMetrics":{}}`, }}, + VolumeMounts: []corev1.VolumeMount{{ + Name: "knative-drain-signal", + MountPath: "/var/run/knative", + }}, } defaultPodSpec = &corev1.PodSpec{ TerminationGracePeriodSeconds: ptr.Int64(45), EnableServiceLinks: ptr.Bool(false), + Volumes: []corev1.Volume{ + { + Name: "knative-drain-signal", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + }, } defaultPodAntiAffinityRules = &corev1.PodAntiAffinity{ @@ -253,10 +270,14 @@ func defaultSidecarContainer(containerName string) *corev1.Container { return &corev1.Container{ Name: containerName, Image: "ubuntu", - Lifecycle: userLifecycle, + Lifecycle: buildLifecycleWithDrainWait(nil), TerminationMessagePolicy: corev1.TerminationMessageFallbackToLogsOnError, Stdin: false, TTY: false, + VolumeMounts: []corev1.VolumeMount{{ + Name: "knative-drain-signal", + MountPath: "/var/run/knative", + }}, Env: []corev1.EnvVar{{ Name: "K_REVISION", Value: "bar", @@ -304,6 +325,11 @@ func queueContainer(opts ...containerOption) corev1.Container { return container(defaultQueueContainer.DeepCopy(), opts...) } +// Helper to get default probe JSON with failureThreshold +func defaultProbeJSON(port int32) string { + return fmt.Sprintf(`{"tcpSocket":{"port":%d,"host":"127.0.0.1"},"failureThreshold":1}`, port) +} + func withEnvVar(name, value string) containerOption { return func(container *corev1.Container) { for i, envVar := range container.Env { @@ -420,7 +446,7 @@ func withAppendedTokenVolumes(appended []appendTokenVolume) podSpecOption { Audience: a.audience, }, } - tokenVolume.VolumeSource.Projected.Sources = append(tokenVolume.VolumeSource.Projected.Sources, *token) + tokenVolume.Projected.Sources = append(tokenVolume.Projected.Sources, *token) } ps.Volumes = append(ps.Volumes, *tokenVolume) } @@ -448,8 +474,8 @@ func appsv1deployment(opts ...deploymentOption) *appsv1.Deployment { func revision(name, ns string, opts ...RevisionOption) *v1.Revision { revision := defaultRevision() - revision.ObjectMeta.Name = name - revision.ObjectMeta.Namespace = ns + revision.Name = name + revision.Namespace = ns for _, option := range opts { option(revision) } @@ -475,7 +501,7 @@ func withoutLabels(revision *v1.Revision) { func withOwnerReference(name string) RevisionOption { return func(revision *v1.Revision) { - revision.ObjectMeta.OwnerReferences = []metav1.OwnerReference{{ + revision.OwnerReferences = []metav1.OwnerReference{{ APIVersion: v1.SchemeGroupVersion.String(), Kind: "Configuration", Name: name, @@ -582,7 +608,7 @@ func TestMakePodSpec(t *testing.T) { ), queueContainer( withEnvVar("USER_PORT", "8888"), - withEnvVar("SERVING_READINESS_PROBE", `{"tcpSocket":{"port":8888,"host":"127.0.0.1"}}`), + withEnvVar("SERVING_READINESS_PROBE", `{"tcpSocket":{"port":8888,"host":"127.0.0.1"},"failureThreshold":1}`), ), }), }, { @@ -629,7 +655,7 @@ func TestMakePodSpec(t *testing.T) { ), queueContainer( withEnvVar("USER_PORT", "8888"), - withEnvVar("SERVING_READINESS_PROBE", `{"tcpSocket":{"port":8888,"host":"127.0.0.1"}}`), + withEnvVar("SERVING_READINESS_PROBE", `{"tcpSocket":{"port":8888,"host":"127.0.0.1"},"failureThreshold":1}`), ), }, withPrependedVolumes(corev1.Volume{ Name: "asdf", @@ -656,7 +682,9 @@ func TestMakePodSpec(t *testing.T) { servingContainer(func(container *corev1.Container) { container.Image = "busybox@sha256:deadbeef" }), - queueContainer(), + queueContainer( + withEnvVar("SERVING_READINESS_PROBE", defaultProbeJSON(v1.DefaultUserPort)), + ), }, func(p *corev1.PodSpec) { p.EnableServiceLinks = ptr.Bool(true) }), @@ -683,7 +711,9 @@ func TestMakePodSpec(t *testing.T) { servingContainer(func(container *corev1.Container) { container.Image = "busybox@sha256:deadbeef" }), - queueContainer(), + queueContainer( + withEnvVar("SERVING_READINESS_PROBE", defaultProbeJSON(v1.DefaultUserPort)), + ), }, func(p *corev1.PodSpec) { p.EnableServiceLinks = nil }), @@ -789,7 +819,9 @@ func TestMakePodSpec(t *testing.T) { servingContainer(func(container *corev1.Container) { container.Image = "busybox@sha256:deadbeef" }), - queueContainer(), + queueContainer( + withEnvVar("SERVING_READINESS_PROBE", defaultProbeJSON(v1.DefaultUserPort)), + ), }, ), }, { @@ -840,7 +872,9 @@ func TestMakePodSpec(t *testing.T) { servingContainer(func(container *corev1.Container) { container.Image = "busybox@sha256:deadbeef" }), - queueContainer(), + queueContainer( + withEnvVar("SERVING_READINESS_PROBE", defaultProbeJSON(v1.DefaultUserPort)), + ), }, ), }, { @@ -863,7 +897,9 @@ func TestMakePodSpec(t *testing.T) { servingContainer(func(container *corev1.Container) { container.Image = "busybox@sha256:deadbeef" }), - queueContainer(), + queueContainer( + withEnvVar("SERVING_READINESS_PROBE", defaultProbeJSON(v1.DefaultUserPort)), + ), }, ), }, { @@ -930,7 +966,7 @@ func TestMakePodSpec(t *testing.T) { container.Image = "busybox@sha256:deadbeef" }), queueContainer( - withEnvVar("SERVING_READINESS_PROBE", `{"httpGet":{"path":"/","port":8080,"host":"127.0.0.1","scheme":"HTTP"}}`), + withEnvVar("SERVING_READINESS_PROBE", `{"httpGet":{"path":"/","port":8080,"host":"127.0.0.1","scheme":"HTTP"},"failureThreshold":1}`), ), }), }, { @@ -951,7 +987,7 @@ func TestMakePodSpec(t *testing.T) { container.Image = "busybox@sha256:deadbeef" }), queueContainer( - withEnvVar("SERVING_READINESS_PROBE", `{"grpc":{"port":8080,"service":null}}`), + withEnvVar("SERVING_READINESS_PROBE", `{"grpc":{"port":8080,"service":null},"failureThreshold":1}`), ), }), }, { @@ -972,7 +1008,7 @@ func TestMakePodSpec(t *testing.T) { container.Image = "busybox@sha256:deadbeef" }), queueContainer( - withEnvVar("SERVING_READINESS_PROBE", `{"tcpSocket":{"port":12345,"host":"127.0.0.1"}}`), + withEnvVar("SERVING_READINESS_PROBE", `{"tcpSocket":{"port":12345,"host":"127.0.0.1"},"failureThreshold":1}`), ), }), }, { @@ -995,7 +1031,7 @@ func TestMakePodSpec(t *testing.T) { container.ReadinessProbe = withExecReadinessProbe([]string{"echo", "hello"}) }), queueContainer( - withEnvVar("SERVING_READINESS_PROBE", `{"tcpSocket":{"port":8080,"host":"127.0.0.1"}}`), + withEnvVar("SERVING_READINESS_PROBE", `{"tcpSocket":{"port":8080,"host":"127.0.0.1"},"failureThreshold":1}`), ), }), }, { @@ -1030,7 +1066,9 @@ func TestMakePodSpec(t *testing.T) { }, }), ), - queueContainer(), + queueContainer( + withEnvVar("SERVING_READINESS_PROBE", defaultProbeJSON(v1.DefaultUserPort)), + ), }), }, { name: "with tcp liveness probe", @@ -1062,7 +1100,9 @@ func TestMakePodSpec(t *testing.T) { }, }), ), - queueContainer(), + queueContainer( + withEnvVar("SERVING_READINESS_PROBE", defaultProbeJSON(v1.DefaultUserPort)), + ), }), }, { name: "with HTTP startup probe", @@ -1095,7 +1135,9 @@ func TestMakePodSpec(t *testing.T) { }, }), ), - queueContainer(), + queueContainer( + withEnvVar("SERVING_READINESS_PROBE", defaultProbeJSON(v1.DefaultUserPort)), + ), }), }, { name: "with TCP startup probe", @@ -1125,7 +1167,9 @@ func TestMakePodSpec(t *testing.T) { TCPSocket: &corev1.TCPSocketAction{}, }), ), - queueContainer(), + queueContainer( + withEnvVar("SERVING_READINESS_PROBE", defaultProbeJSON(v1.DefaultUserPort)), + ), }), }, { name: "complex pod spec", @@ -1158,6 +1202,7 @@ func TestMakePodSpec(t *testing.T) { ), queueContainer( withEnvVar("SERVING_SERVICE", "svc"), + withEnvVar("SERVING_READINESS_PROBE", defaultProbeJSON(v1.DefaultUserPort)), ), }), }, { @@ -1276,7 +1321,7 @@ func TestMakePodSpec(t *testing.T) { queueContainer( withEnvVar("SERVING_SERVICE", "svc"), withEnvVar("USER_PORT", "8888"), - withEnvVar("SERVING_READINESS_PROBE", `{"tcpSocket":{"port":8888,"host":"127.0.0.1"}}`), + withEnvVar("SERVING_READINESS_PROBE", `{"tcpSocket":{"port":8888,"host":"127.0.0.1"},"failureThreshold":1}`), ), }), }, { @@ -1302,7 +1347,7 @@ func TestMakePodSpec(t *testing.T) { ), queueContainer( withEnvVar("USER_PORT", "8080"), - withEnvVar("SERVING_READINESS_PROBE", `{"tcpSocket":{"port":8080,"host":"127.0.0.1"}}`), + withEnvVar("SERVING_READINESS_PROBE", `{"tcpSocket":{"port":8080,"host":"127.0.0.1"},"failureThreshold":1}`), ), }, func(p *corev1.PodSpec) { @@ -1334,11 +1379,11 @@ func TestMakePodSpec(t *testing.T) { []corev1.Container{ servingContainer(func(container *corev1.Container) { container.Image = "busybox@sha256:deadbeef" - container.VolumeMounts = []corev1.VolumeMount{{ + container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{ Name: varLogVolume.Name, MountPath: "/var/log", SubPathExpr: "$(K_INTERNAL_POD_NAMESPACE)_$(K_INTERNAL_POD_NAME)_" + servingContainerName, - }} + }) container.Env = append(container.Env, corev1.EnvVar{ Name: "K_INTERNAL_POD_NAME", @@ -1351,11 +1396,11 @@ func TestMakePodSpec(t *testing.T) { }), sidecarContainer(sidecarContainerName, func(c *corev1.Container) { c.Image = "ubuntu@sha256:deadbeef" - c.VolumeMounts = []corev1.VolumeMount{{ + c.VolumeMounts = append(c.VolumeMounts, corev1.VolumeMount{ Name: varLogVolume.Name, MountPath: "/var/log", SubPathExpr: "$(K_INTERNAL_POD_NAMESPACE)_$(K_INTERNAL_POD_NAME)_" + sidecarContainerName, - }} + }) c.Env = append(c.Env, corev1.EnvVar{ Name: "K_INTERNAL_POD_NAME", @@ -1367,7 +1412,7 @@ func TestMakePodSpec(t *testing.T) { }) }), queueContainer( - withEnvVar("SERVING_READINESS_PROBE", `{"tcpSocket":{"port":8080,"host":"127.0.0.1"}}`), + withEnvVar("SERVING_READINESS_PROBE", `{"tcpSocket":{"port":8080,"host":"127.0.0.1"},"failureThreshold":1}`), withEnvVar("OBSERVABILITY_CONFIG", `{"tracing":{},"metrics":{},"runtime":{},"requestMetrics":{},"EnableVarLogCollection":true}`), ), }, @@ -1416,10 +1461,10 @@ func TestMakePodSpec(t *testing.T) { container.Image = "busybox@sha256:deadbeef" }), queueContainer(func(container *corev1.Container) { - container.VolumeMounts = []corev1.VolumeMount{{ + container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{ Name: varTokenVolume.Name, MountPath: "/var/run/secrets/tokens", - }} + }) }), }, withAppendedTokenVolumes([]appendTokenVolume{{filename: "boo-srv", audience: "boo-srv", expires: 3600}}), @@ -1486,7 +1531,7 @@ func TestMakePodSpec(t *testing.T) { ), queueContainer( withEnvVar("ENABLE_MULTI_CONTAINER_PROBES", "true"), - withEnvVar("SERVING_READINESS_PROBE", `[{"httpGet":{"path":"/","port":8080,"host":"127.0.0.1","scheme":"HTTP"}},{"httpGet":{"path":"/","port":8090,"host":"127.0.0.1","scheme":"HTTP"}}]`), + withEnvVar("SERVING_READINESS_PROBE", `[{"httpGet":{"path":"/","port":8080,"host":"127.0.0.1","scheme":"HTTP"},"failureThreshold":1},{"httpGet":{"path":"/","port":8090,"host":"127.0.0.1","scheme":"HTTP"},"failureThreshold":1}]`), ), }), }, { @@ -1525,7 +1570,7 @@ func TestMakePodSpec(t *testing.T) { ), queueContainer( withEnvVar("ENABLE_MULTI_CONTAINER_PROBES", "true"), - withEnvVar("SERVING_READINESS_PROBE", `[{"tcpSocket":{"port":8080,"host":"127.0.0.1"}}]`), + withEnvVar("SERVING_READINESS_PROBE", `[{"tcpSocket":{"port":8080,"host":"127.0.0.1"},"failureThreshold":1}]`), ), }), }, { @@ -1551,7 +1596,9 @@ func TestMakePodSpec(t *testing.T) { servingContainer(func(container *corev1.Container) { container.Image = "busybox@sha256:deadbeef" }), - queueContainer(), + queueContainer( + withEnvVar("SERVING_READINESS_PROBE", defaultProbeJSON(v1.DefaultUserPort)), + ), }, func(p *corev1.PodSpec) { p.Affinity = &corev1.Affinity{ @@ -1582,7 +1629,9 @@ func TestMakePodSpec(t *testing.T) { servingContainer(func(container *corev1.Container) { container.Image = "busybox@sha256:deadbeef" }), - queueContainer(), + queueContainer( + withEnvVar("SERVING_READINESS_PROBE", defaultProbeJSON(v1.DefaultUserPort)), + ), }, ), }, { @@ -1612,7 +1661,9 @@ func TestMakePodSpec(t *testing.T) { servingContainer(func(container *corev1.Container) { container.Image = "busybox@sha256:deadbeef" }), - queueContainer(), + queueContainer( + withEnvVar("SERVING_READINESS_PROBE", defaultProbeJSON(v1.DefaultUserPort)), + ), }, func(p *corev1.PodSpec) { p.Affinity = &corev1.Affinity{ @@ -1640,7 +1691,7 @@ func TestMakePodSpec(t *testing.T) { container.Image = "busybox" }), queueContainer( - withEnvVar("SERVING_READINESS_PROBE", `{"httpGet":{"path":"/","port":8080,"host":"127.0.0.1","scheme":"HTTP"}}`), + withEnvVar("SERVING_READINESS_PROBE", `{"httpGet":{"path":"/","port":8080,"host":"127.0.0.1","scheme":"HTTP"},"failureThreshold":1}`), ), }, withRuntimeClass("gvisor")), }, { @@ -1667,7 +1718,7 @@ func TestMakePodSpec(t *testing.T) { container.Image = "busybox" }), queueContainer( - withEnvVar("SERVING_READINESS_PROBE", `{"httpGet":{"path":"/","port":8080,"host":"127.0.0.1","scheme":"HTTP"}}`), + withEnvVar("SERVING_READINESS_PROBE", `{"httpGet":{"path":"/","port":8080,"host":"127.0.0.1","scheme":"HTTP"},"failureThreshold":1}`), ), }), }, { @@ -1695,7 +1746,7 @@ func TestMakePodSpec(t *testing.T) { container.Image = "busybox" }), queueContainer( - withEnvVar("SERVING_READINESS_PROBE", `{"httpGet":{"path":"/","port":8080,"host":"127.0.0.1","scheme":"HTTP"}}`), + withEnvVar("SERVING_READINESS_PROBE", `{"httpGet":{"path":"/","port":8080,"host":"127.0.0.1","scheme":"HTTP"},"failureThreshold":1}`), ), }, withRuntimeClass("gvisor")), }, { @@ -1724,7 +1775,7 @@ func TestMakePodSpec(t *testing.T) { container.Image = "busybox" }), queueContainer( - withEnvVar("SERVING_READINESS_PROBE", `{"httpGet":{"path":"/","port":8080,"host":"127.0.0.1","scheme":"HTTP"}}`), + withEnvVar("SERVING_READINESS_PROBE", `{"httpGet":{"path":"/","port":8080,"host":"127.0.0.1","scheme":"HTTP"},"failureThreshold":1}`), ), }, withRuntimeClass("kata")), }} diff --git a/pkg/reconciler/revision/resources/queue.go b/pkg/reconciler/revision/resources/queue.go index a5754f8e4a32..cf05b83eaf40 100644 --- a/pkg/reconciler/revision/resources/queue.go +++ b/pkg/reconciler/revision/resources/queue.go @@ -297,6 +297,13 @@ func makeQueueContainer(rev *v1.Revision, cfg *config.Config) (*corev1.Container }}, }, } + // Make queue proxy readiness probe more aggressive only if not user-defined + if queueProxyReadinessProbe.PeriodSeconds == 0 { + queueProxyReadinessProbe.PeriodSeconds = 1 + } + if queueProxyReadinessProbe.FailureThreshold == 0 { + queueProxyReadinessProbe.FailureThreshold = 1 + } } // Sidecar readiness probes @@ -439,6 +446,10 @@ func makeQueueContainer(rev *v1.Revision, cfg *config.Config) (*corev1.Container Name: "OBSERVABILITY_CONFIG", Value: string(o11yConfig), }}, + VolumeMounts: []corev1.VolumeMount{{ + Name: "knative-drain-signal", + MountPath: "/var/run/knative", + }}, } return c, nil @@ -470,6 +481,10 @@ func applyReadinessProbeDefaults(p *corev1.Probe, port int32) { p.GRPC.Port = port } + // Set aggressive defaults for faster failure detection + if p.FailureThreshold == 0 { + p.FailureThreshold = 1 // Mark unready immediately on failure + } if p.PeriodSeconds > 0 && p.TimeoutSeconds < 1 { p.TimeoutSeconds = 1 } diff --git a/pkg/reconciler/revision/resources/queue_test.go b/pkg/reconciler/revision/resources/queue_test.go index 1f7f05a4bc61..e30e350c896c 100644 --- a/pkg/reconciler/revision/resources/queue_test.go +++ b/pkg/reconciler/revision/resources/queue_test.go @@ -78,7 +78,7 @@ var ( defaults, _ = apicfg.NewDefaultsConfigFromMap(nil) ) -const testProbeJSONTemplate = `{"tcpSocket":{"port":%d,"host":"127.0.0.1"}}` +const testProbeJSONTemplate = `{"tcpSocket":{"port":%d,"host":"127.0.0.1"},"failureThreshold":1}` func TestMakeQueueContainer(t *testing.T) { tests := []struct { @@ -90,409 +90,411 @@ func TestMakeQueueContainer(t *testing.T) { dc deployment.Config fc apicfg.Features want corev1.Container - }{{ - name: "autoscaler single", - rev: revision("bar", "foo", - withContainers(containers), - withContainerConcurrency(1)), - want: queueContainer(func(c *corev1.Container) { - c.Env = env(map[string]string{ - "CONTAINER_CONCURRENCY": "1", - }) - }), - }, { - name: "custom readiness probe port", - rev: revision("bar", "foo", - withContainers([]corev1.Container{{ - Name: servingContainerName, - ReadinessProbe: &corev1.Probe{ - ProbeHandler: corev1.ProbeHandler{ - TCPSocket: &corev1.TCPSocketAction{ - Host: "127.0.0.1", - Port: intstr.FromInt(8087), + }{ + { + name: "autoscaler single", + rev: revision("bar", "foo", + withContainers(containers), + withContainerConcurrency(1)), + want: queueContainer(func(c *corev1.Container) { + c.Env = env(map[string]string{ + "CONTAINER_CONCURRENCY": "1", + }) + }), + }, { + name: "custom readiness probe port", + rev: revision("bar", "foo", + withContainers([]corev1.Container{{ + Name: servingContainerName, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + TCPSocket: &corev1.TCPSocketAction{ + Host: "127.0.0.1", + Port: intstr.FromInt(8087), + }, }, }, + Ports: []corev1.ContainerPort{{ + ContainerPort: 1955, + Name: string(netapi.ProtocolH2C), + }}, + }})), + dc: deployment.Config{ + QueueSidecarImage: "alpine", + }, + want: queueContainer(func(c *corev1.Container) { + c.Image = "alpine" + c.Ports = append(queueNonServingPorts, queueHTTP2Port, queueHTTPSPort) + c.ReadinessProbe.HTTPGet.Port.IntVal = queueHTTP2Port.ContainerPort + c.Env = env(map[string]string{ + "USER_PORT": "1955", + "QUEUE_SERVING_PORT": "8013", + }) + }), + }, { + name: "custom sidecar image, container port, protocol", + rev: revision("bar", "foo", + withContainers([]corev1.Container{{ + Name: servingContainerName, + ReadinessProbe: testProbe, + Ports: []corev1.ContainerPort{{ + ContainerPort: 1955, + Name: string(netapi.ProtocolH2C), + }}, + }})), + dc: deployment.Config{ + QueueSidecarImage: "alpine", + }, + want: queueContainer(func(c *corev1.Container) { + c.Image = "alpine" + c.Ports = append(queueNonServingPorts, queueHTTP2Port, queueHTTPSPort) + c.ReadinessProbe.HTTPGet.Port.IntVal = queueHTTP2Port.ContainerPort + c.Env = env(map[string]string{ + "USER_PORT": "1955", + "QUEUE_SERVING_PORT": "8013", + }) + }), + }, { + name: "service name in labels", + rev: revision("bar", "foo", + withContainers(containers), + func(revision *v1.Revision) { + revision.Labels = map[string]string{ + serving.ServiceLabelKey: "svc", + } + }), + want: queueContainer(func(c *corev1.Container) { + c.Env = env(map[string]string{ + "SERVING_SERVICE": "svc", + }) + }), + }, { + name: "config owner as env var, zero concurrency", + rev: revision("blah", "baz", + withContainers(containers), + withContainerConcurrency(0), + func(revision *v1.Revision) { + revision.OwnerReferences = []metav1.OwnerReference{{ + APIVersion: v1.SchemeGroupVersion.String(), + Kind: "Configuration", + Name: "the-parent-config-name", + Controller: ptr.Bool(true), + BlockOwnerDeletion: ptr.Bool(true), + }} + }), + want: queueContainer(func(c *corev1.Container) { + c.Env = env(map[string]string{ + "CONTAINER_CONCURRENCY": "0", + "SERVING_CONFIGURATION": "the-parent-config-name", + "SERVING_NAMESPACE": "baz", + "SERVING_REVISION": "blah", + }) + }), + }, { + name: "logging configuration as env var", + rev: revision("this", "log", + withContainers(containers)), + lc: logging.Config{ + LoggingConfig: "The logging configuration goes here", + LoggingLevel: map[string]zapcore.Level{ + "queueproxy": zapcore.ErrorLevel, }, - Ports: []corev1.ContainerPort{{ - ContainerPort: 1955, - Name: string(netapi.ProtocolH2C), - }}, - }})), - dc: deployment.Config{ - QueueSidecarImage: "alpine", - }, - want: queueContainer(func(c *corev1.Container) { - c.Image = "alpine" - c.Ports = append(queueNonServingPorts, queueHTTP2Port, queueHTTPSPort) - c.ReadinessProbe.ProbeHandler.HTTPGet.Port.IntVal = queueHTTP2Port.ContainerPort - c.Env = env(map[string]string{ - "USER_PORT": "1955", - "QUEUE_SERVING_PORT": "8013", - }) - }), - }, { - name: "custom sidecar image, container port, protocol", - rev: revision("bar", "foo", - withContainers([]corev1.Container{{ - Name: servingContainerName, - ReadinessProbe: testProbe, - Ports: []corev1.ContainerPort{{ - ContainerPort: 1955, - Name: string(netapi.ProtocolH2C), - }}, - }})), - dc: deployment.Config{ - QueueSidecarImage: "alpine", - }, - want: queueContainer(func(c *corev1.Container) { - c.Image = "alpine" - c.Ports = append(queueNonServingPorts, queueHTTP2Port, queueHTTPSPort) - c.ReadinessProbe.ProbeHandler.HTTPGet.Port.IntVal = queueHTTP2Port.ContainerPort - c.Env = env(map[string]string{ - "USER_PORT": "1955", - "QUEUE_SERVING_PORT": "8013", - }) - }), - }, { - name: "service name in labels", - rev: revision("bar", "foo", - withContainers(containers), - func(revision *v1.Revision) { - revision.Labels = map[string]string{ - serving.ServiceLabelKey: "svc", - } + }, + want: queueContainer(func(c *corev1.Container) { + c.Env = env(map[string]string{ + "SERVING_LOGGING_CONFIG": "The logging configuration goes here", + "SERVING_LOGGING_LEVEL": "error", + "SERVING_NAMESPACE": "log", + "SERVING_REVISION": "this", + }) }), - want: queueContainer(func(c *corev1.Container) { - c.Env = env(map[string]string{ - "SERVING_SERVICE": "svc", - }) - }), - }, { - name: "config owner as env var, zero concurrency", - rev: revision("blah", "baz", - withContainers(containers), - withContainerConcurrency(0), - func(revision *v1.Revision) { - revision.ObjectMeta.OwnerReferences = []metav1.OwnerReference{{ - APIVersion: v1.SchemeGroupVersion.String(), - Kind: "Configuration", - Name: "the-parent-config-name", - Controller: ptr.Bool(true), - BlockOwnerDeletion: ptr.Bool(true), - }} + }, { + name: "container concurrency 10", + rev: revision("bar", "foo", + withContainers(containers), + withContainerConcurrency(10)), + want: queueContainer(func(c *corev1.Container) { + c.Env = env(map[string]string{ + "CONTAINER_CONCURRENCY": "10", + }) }), - want: queueContainer(func(c *corev1.Container) { - c.Env = env(map[string]string{ - "CONTAINER_CONCURRENCY": "0", - "SERVING_CONFIGURATION": "the-parent-config-name", - "SERVING_NAMESPACE": "baz", - "SERVING_REVISION": "blah", - }) - }), - }, { - name: "logging configuration as env var", - rev: revision("this", "log", - withContainers(containers)), - lc: logging.Config{ - LoggingConfig: "The logging configuration goes here", - LoggingLevel: map[string]zapcore.Level{ - "queueproxy": zapcore.ErrorLevel, + }, { + name: "request log configuration as env var", + rev: revision("bar", "foo", + withContainers(containers)), + oc: observability.Config{ + RequestLogTemplate: "test template", + EnableProbeRequestLog: true, }, - }, - want: queueContainer(func(c *corev1.Container) { - c.Env = env(map[string]string{ - "SERVING_LOGGING_CONFIG": "The logging configuration goes here", - "SERVING_LOGGING_LEVEL": "error", - "SERVING_NAMESPACE": "log", - "SERVING_REVISION": "this", - }) - }), - }, { - name: "container concurrency 10", - rev: revision("bar", "foo", - withContainers(containers), - withContainerConcurrency(10)), - want: queueContainer(func(c *corev1.Container) { - c.Env = env(map[string]string{ - "CONTAINER_CONCURRENCY": "10", - }) - }), - }, { - name: "request log configuration as env var", - rev: revision("bar", "foo", - withContainers(containers)), - oc: observability.Config{ - RequestLogTemplate: "test template", - EnableProbeRequestLog: true, - }, - want: queueContainer(func(c *corev1.Container) { - c.Env = env(map[string]string{ - "OBSERVABILITY_CONFIG": `{"tracing":{},"metrics":{},"runtime":{},"requestMetrics":{},"requestLogTemplate":"test template","enableProbeRequestLog":true}`, - }) - }), - }, { - name: "disabled request log configuration as env var", - rev: revision("bar", "foo", - withContainers(containers)), - oc: observability.Config{ - RequestLogTemplate: "test template", - EnableProbeRequestLog: false, - EnableRequestLog: false, - }, - want: queueContainer(func(c *corev1.Container) { - c.Env = env(map[string]string{ - "OBSERVABILITY_CONFIG": `{"tracing":{},"metrics":{},"runtime":{},"requestMetrics":{},"requestLogTemplate":"test template"}`, - }) - }), - }, { - name: "request metrics backend as env var", - rev: revision("bar", "foo", - withContainers(containers)), - oc: observability.Config{ - RequestMetrics: observability.MetricsConfig{ - Protocol: "prometheus", + want: queueContainer(func(c *corev1.Container) { + c.Env = env(map[string]string{ + "OBSERVABILITY_CONFIG": `{"tracing":{},"metrics":{},"runtime":{},"requestMetrics":{},"requestLogTemplate":"test template","enableProbeRequestLog":true}`, + }) + }), + }, { + name: "disabled request log configuration as env var", + rev: revision("bar", "foo", + withContainers(containers)), + oc: observability.Config{ + RequestLogTemplate: "test template", + EnableProbeRequestLog: false, + EnableRequestLog: false, }, - }, - want: queueContainer(func(c *corev1.Container) { - c.Env = env(map[string]string{ - "OBSERVABILITY_CONFIG": `{"tracing":{},"metrics":{},"runtime":{},"requestMetrics":{"protocol":"prometheus"}}`, - }) - }), - }, { - name: "enable profiling", - rev: revision("bar", "foo", - withContainers(containers)), - oc: observability.Config{ - BaseConfig: observability.BaseConfig{ - Runtime: observability.RuntimeConfig{ - Profiling: "enabled", + want: queueContainer(func(c *corev1.Container) { + c.Env = env(map[string]string{ + "OBSERVABILITY_CONFIG": `{"tracing":{},"metrics":{},"runtime":{},"requestMetrics":{},"requestLogTemplate":"test template"}`, + }) + }), + }, { + name: "request metrics backend as env var", + rev: revision("bar", "foo", + withContainers(containers)), + oc: observability.Config{ + RequestMetrics: observability.MetricsConfig{ + Protocol: "prometheus", }, }, - }, - want: queueContainer(func(c *corev1.Container) { - c.Env = env(map[string]string{ - "OBSERVABILITY_CONFIG": `{"tracing":{},"metrics":{},"runtime":{"profiling":"enabled"},"requestMetrics":{}}`, - }) - c.Ports = append(queueNonServingPorts, profilingPort, queueHTTPPort, queueHTTPSPort) - }), - }, { - name: "custom TimeoutSeconds", - rev: revision("bar", "foo", - withContainers(containers), - func(revision *v1.Revision) { - revision.Spec.TimeoutSeconds = ptr.Int64(99) + want: queueContainer(func(c *corev1.Container) { + c.Env = env(map[string]string{ + "OBSERVABILITY_CONFIG": `{"tracing":{},"metrics":{},"runtime":{},"requestMetrics":{"protocol":"prometheus"}}`, + }) + }), + }, { + name: "enable profiling", + rev: revision("bar", "foo", + withContainers(containers)), + oc: observability.Config{ + BaseConfig: observability.BaseConfig{ + Runtime: observability.RuntimeConfig{ + Profiling: "enabled", + }, + }, }, - ), - want: queueContainer(func(c *corev1.Container) { - c.Env = env(map[string]string{ - "REVISION_TIMEOUT_SECONDS": "99", - }) - }), - }, { - name: "custom ResponseStartTimeoutSeconds", - rev: revision("bar", "foo", - withContainers(containers), - func(revision *v1.Revision) { - revision.Spec.ResponseStartTimeoutSeconds = ptr.Int64(77) + want: queueContainer(func(c *corev1.Container) { + c.Env = env(map[string]string{ + "OBSERVABILITY_CONFIG": `{"tracing":{},"metrics":{},"runtime":{"profiling":"enabled"},"requestMetrics":{}}`, + }) + c.Ports = append(queueNonServingPorts, profilingPort, queueHTTPPort, queueHTTPSPort) + }), + }, { + name: "custom TimeoutSeconds", + rev: revision("bar", "foo", + withContainers(containers), + func(revision *v1.Revision) { + revision.Spec.TimeoutSeconds = ptr.Int64(99) + }, + ), + want: queueContainer(func(c *corev1.Container) { + c.Env = env(map[string]string{ + "REVISION_TIMEOUT_SECONDS": "99", + }) + }), + }, { + name: "custom ResponseStartTimeoutSeconds", + rev: revision("bar", "foo", + withContainers(containers), + func(revision *v1.Revision) { + revision.Spec.ResponseStartTimeoutSeconds = ptr.Int64(77) + }, + ), + want: queueContainer(func(c *corev1.Container) { + c.Env = env(map[string]string{ + "REVISION_RESPONSE_START_TIMEOUT_SECONDS": "77", + }) + }), + }, { + name: "custom IdleTimeoutSeconds", + rev: revision("bar", "foo", + withContainers(containers), + func(revision *v1.Revision) { + revision.Spec.IdleTimeoutSeconds = ptr.Int64(99) + }, + ), + want: queueContainer(func(c *corev1.Container) { + c.Env = env(map[string]string{ + "REVISION_IDLE_TIMEOUT_SECONDS": "99", + }) + }), + }, { + name: "default resource config with feature qp defaults disabled", + rev: revision("bar", "foo", + withContainers(containers)), + dc: deployment.Config{ + QueueSidecarCPURequest: &deployment.QueueSidecarCPURequestDefault, }, - ), - want: queueContainer(func(c *corev1.Container) { - c.Env = env(map[string]string{ - "REVISION_RESPONSE_START_TIMEOUT_SECONDS": "77", - }) - }), - }, { - name: "custom IdleTimeoutSeconds", - rev: revision("bar", "foo", - withContainers(containers), - func(revision *v1.Revision) { - revision.Spec.IdleTimeoutSeconds = ptr.Int64(99) + want: queueContainer(func(c *corev1.Container) { + c.Env = env(map[string]string{}) + c.Resources.Requests = corev1.ResourceList{ + corev1.ResourceCPU: deployment.QueueSidecarCPURequestDefault, + } + }), + }, { + name: "resource config with feature qp defaults enabled", + rev: revision("bar", "foo", + withContainers(containers)), + dc: deployment.Config{ + QueueSidecarCPURequest: &deployment.QueueSidecarCPURequestDefault, }, - ), - want: queueContainer(func(c *corev1.Container) { - c.Env = env(map[string]string{ - "REVISION_IDLE_TIMEOUT_SECONDS": "99", - }) - }), - }, { - name: "default resource config with feature qp defaults disabled", - rev: revision("bar", "foo", - withContainers(containers)), - dc: deployment.Config{ - QueueSidecarCPURequest: &deployment.QueueSidecarCPURequestDefault, - }, - want: queueContainer(func(c *corev1.Container) { - c.Env = env(map[string]string{}) - c.Resources.Requests = corev1.ResourceList{ - corev1.ResourceCPU: deployment.QueueSidecarCPURequestDefault, - } - }), - }, { - name: "resource config with feature qp defaults enabled", - rev: revision("bar", "foo", - withContainers(containers)), - dc: deployment.Config{ - QueueSidecarCPURequest: &deployment.QueueSidecarCPURequestDefault, - }, - fc: apicfg.Features{ - QueueProxyResourceDefaults: apicfg.Enabled, - }, - want: queueContainer(func(c *corev1.Container) { - c.Env = env(map[string]string{}) - c.Resources.Requests = corev1.ResourceList{ - corev1.ResourceCPU: deployment.QueueSidecarCPURequestDefault, - corev1.ResourceMemory: deployment.QueueSidecarMemoryRequestDefault, - } - c.Resources.Limits = corev1.ResourceList{ - corev1.ResourceCPU: deployment.QueueSidecarCPULimitDefault, - corev1.ResourceMemory: deployment.QueueSidecarMemoryLimitDefault, - } - }), - }, { - name: "overridden resources", - rev: revision("bar", "foo", - withContainers(containers)), - dc: deployment.Config{ - QueueSidecarCPURequest: resourcePtr(resource.MustParse("123m")), - QueueSidecarEphemeralStorageRequest: resourcePtr(resource.MustParse("456M")), - QueueSidecarMemoryLimit: resourcePtr(resource.MustParse("789m")), - }, - want: queueContainer(func(c *corev1.Container) { - c.Env = env(map[string]string{}) - c.Resources.Requests = corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("123m"), - corev1.ResourceEphemeralStorage: resource.MustParse("456M"), - } - c.Resources.Limits = corev1.ResourceList{ - corev1.ResourceMemory: resource.MustParse("789m"), - } - }), - }, { - name: "collector address as env var", - rev: revision("bar", "foo", - withContainers(containers)), - oc: observability.Config{ - RequestMetrics: observability.MetricsConfig{ - Protocol: "http/protobuf", - Endpoint: "otel:55678", + fc: apicfg.Features{ + QueueProxyResourceDefaults: apicfg.Enabled, }, - }, - want: queueContainer(func(c *corev1.Container) { - c.Env = env(map[string]string{ - "OBSERVABILITY_CONFIG": `{"tracing":{},"metrics":{},"runtime":{},"requestMetrics":{"protocol":"http/protobuf","endpoint":"otel:55678"}}`, - }) - }), - }, { - name: "HTTP2 autodetection enabled", - rev: revision("bar", "foo", - withContainers(containers)), - fc: apicfg.Features{ - AutoDetectHTTP2: apicfg.Enabled, - }, - want: queueContainer(func(c *corev1.Container) { - c.Env = env(map[string]string{ - "ENABLE_HTTP2_AUTO_DETECTION": "true", - }) - }), - }, { - name: "HTTP1 full duplex enabled", - rev: revision("bar", "foo", - withContainers(containers), - WithRevisionAnnotations(map[string]string{apicfg.AllowHTTPFullDuplexFeatureKey: string(apicfg.Enabled)})), - want: queueContainer(func(c *corev1.Container) { - c.Env = env(map[string]string{ - "ENABLE_HTTP_FULL_DUPLEX": "true", - }) - }), - }, { - name: "set root ca", - rev: revision("bar", "foo", - withContainers(containers)), - dc: deployment.Config{ - QueueSidecarRootCA: "xyz", - }, - want: queueContainer(func(c *corev1.Container) { - c.Env = env(map[string]string{ - "ROOT_CA": "xyz", - }) - }), - }, { - name: "HTTP2 autodetection disabled", - rev: revision("bar", "foo", - withContainers(containers)), - fc: apicfg.Features{ - AutoDetectHTTP2: apicfg.Disabled, - }, - dc: deployment.Config{ - ProgressDeadline: 0 * time.Second, - }, - want: queueContainer(func(c *corev1.Container) { - c.Env = env(map[string]string{ - "ENABLE_HTTP2_AUTO_DETECTION": "false", - }) - }), - }, { - name: "multi container probing enabled", - rev: revision("bar", "foo", withContainers(containers)), - fc: apicfg.Features{ - MultiContainerProbing: apicfg.Enabled, - }, - dc: deployment.Config{ - ProgressDeadline: 0 * time.Second, - }, - want: queueContainer(func(c *corev1.Container) { - c.Env = env(map[string]string{ - "ENABLE_MULTI_CONTAINER_PROBES": "true", - }) - }), - }, { - name: "multi container probing enabled with exec probes on all containers", - rev: revision("bar", "foo", withContainers([]corev1.Container{ - { - Name: servingContainerName, - ReadinessProbe: &corev1.Probe{ - ProbeHandler: corev1.ProbeHandler{ - Exec: &corev1.ExecAction{ - Command: []string{"bin/sh", "serving.sh"}, + want: queueContainer(func(c *corev1.Container) { + c.Env = env(map[string]string{}) + c.Resources.Requests = corev1.ResourceList{ + corev1.ResourceCPU: deployment.QueueSidecarCPURequestDefault, + corev1.ResourceMemory: deployment.QueueSidecarMemoryRequestDefault, + } + c.Resources.Limits = corev1.ResourceList{ + corev1.ResourceCPU: deployment.QueueSidecarCPULimitDefault, + corev1.ResourceMemory: deployment.QueueSidecarMemoryLimitDefault, + } + }), + }, { + name: "overridden resources", + rev: revision("bar", "foo", + withContainers(containers)), + dc: deployment.Config{ + QueueSidecarCPURequest: resourcePtr(resource.MustParse("123m")), + QueueSidecarEphemeralStorageRequest: resourcePtr(resource.MustParse("456M")), + QueueSidecarMemoryLimit: resourcePtr(resource.MustParse("789m")), + }, + want: queueContainer(func(c *corev1.Container) { + c.Env = env(map[string]string{}) + c.Resources.Requests = corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("123m"), + corev1.ResourceEphemeralStorage: resource.MustParse("456M"), + } + c.Resources.Limits = corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("789m"), + } + }), + }, { + name: "collector address as env var", + rev: revision("bar", "foo", + withContainers(containers)), + oc: observability.Config{ + RequestMetrics: observability.MetricsConfig{ + Protocol: "http/protobuf", + Endpoint: "otel:55678", + }, + }, + want: queueContainer(func(c *corev1.Container) { + c.Env = env(map[string]string{ + "OBSERVABILITY_CONFIG": `{"tracing":{},"metrics":{},"runtime":{},"requestMetrics":{"protocol":"http/protobuf","endpoint":"otel:55678"}}`, + }) + }), + }, { + name: "HTTP2 autodetection enabled", + rev: revision("bar", "foo", + withContainers(containers)), + fc: apicfg.Features{ + AutoDetectHTTP2: apicfg.Enabled, + }, + want: queueContainer(func(c *corev1.Container) { + c.Env = env(map[string]string{ + "ENABLE_HTTP2_AUTO_DETECTION": "true", + }) + }), + }, { + name: "HTTP1 full duplex enabled", + rev: revision("bar", "foo", + withContainers(containers), + WithRevisionAnnotations(map[string]string{apicfg.AllowHTTPFullDuplexFeatureKey: string(apicfg.Enabled)})), + want: queueContainer(func(c *corev1.Container) { + c.Env = env(map[string]string{ + "ENABLE_HTTP_FULL_DUPLEX": "true", + }) + }), + }, { + name: "set root ca", + rev: revision("bar", "foo", + withContainers(containers)), + dc: deployment.Config{ + QueueSidecarRootCA: "xyz", + }, + want: queueContainer(func(c *corev1.Container) { + c.Env = env(map[string]string{ + "ROOT_CA": "xyz", + }) + }), + }, { + name: "HTTP2 autodetection disabled", + rev: revision("bar", "foo", + withContainers(containers)), + fc: apicfg.Features{ + AutoDetectHTTP2: apicfg.Disabled, + }, + dc: deployment.Config{ + ProgressDeadline: 0 * time.Second, + }, + want: queueContainer(func(c *corev1.Container) { + c.Env = env(map[string]string{ + "ENABLE_HTTP2_AUTO_DETECTION": "false", + }) + }), + }, { + name: "multi container probing enabled", + rev: revision("bar", "foo", withContainers(containers)), + fc: apicfg.Features{ + MultiContainerProbing: apicfg.Enabled, + }, + dc: deployment.Config{ + ProgressDeadline: 0 * time.Second, + }, + want: queueContainer(func(c *corev1.Container) { + c.Env = env(map[string]string{ + "ENABLE_MULTI_CONTAINER_PROBES": "true", + }) + }), + }, { + name: "multi container probing enabled with exec probes on all containers", + rev: revision("bar", "foo", withContainers([]corev1.Container{ + { + Name: servingContainerName, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + Exec: &corev1.ExecAction{ + Command: []string{"bin/sh", "serving.sh"}, + }, }, }, + Ports: []corev1.ContainerPort{{ + ContainerPort: 1955, + Name: string(netapi.ProtocolH2C), + }}, }, - Ports: []corev1.ContainerPort{{ - ContainerPort: 1955, - Name: string(netapi.ProtocolH2C), - }}, - }, - { - Name: sidecarContainerName, - ReadinessProbe: &corev1.Probe{ - ProbeHandler: corev1.ProbeHandler{ - Exec: &corev1.ExecAction{ - Command: []string{"bin/sh", "sidecar.sh"}, + { + Name: sidecarContainerName, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + Exec: &corev1.ExecAction{ + Command: []string{"bin/sh", "sidecar.sh"}, + }, }, }, }, + })), + fc: apicfg.Features{ + MultiContainerProbing: apicfg.Enabled, }, - })), - fc: apicfg.Features{ - MultiContainerProbing: apicfg.Enabled, - }, - dc: deployment.Config{ - ProgressDeadline: 0 * time.Second, + dc: deployment.Config{ + ProgressDeadline: 0 * time.Second, + }, + want: queueContainer(func(c *corev1.Container) { + c.Ports = append(queueNonServingPorts, queueHTTP2Port, queueHTTPSPort) + c.ReadinessProbe.HTTPGet.Port.IntVal = queueHTTP2Port.ContainerPort + c.Env = env(map[string]string{ + "ENABLE_MULTI_CONTAINER_PROBES": "true", + "USER_PORT": "1955", + "QUEUE_SERVING_PORT": "8013", + }) + }), }, - want: queueContainer(func(c *corev1.Container) { - c.Ports = append(queueNonServingPorts, queueHTTP2Port, queueHTTPSPort) - c.ReadinessProbe.HTTPGet.Port.IntVal = queueHTTP2Port.ContainerPort - c.Env = env(map[string]string{ - "ENABLE_MULTI_CONTAINER_PROBES": "true", - "USER_PORT": "1955", - "QUEUE_SERVING_PORT": "8013", - }) - }), - }} + } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - if len(test.rev.Spec.PodSpec.Containers) == 0 { + if len(test.rev.Spec.Containers) == 0 { test.rev.Spec.PodSpec = corev1.PodSpec{ Containers: []corev1.Container{{ Name: servingContainerName, @@ -544,7 +546,7 @@ func TestMakeQueueContainerWithPercentageAnnotation(t *testing.T) { revision.Annotations = map[string]string{ serving.QueueSidecarResourcePercentageAnnotationKey: "20", } - revision.Spec.PodSpec.Containers = []corev1.Container{{ + revision.Spec.Containers = []corev1.Container{{ Name: servingContainerName, ReadinessProbe: testProbe, Resources: corev1.ResourceRequirements{ @@ -569,7 +571,7 @@ func TestMakeQueueContainerWithPercentageAnnotation(t *testing.T) { revision.Annotations = map[string]string{ serving.QueueSidecarResourcePercentageAnnotationKey: "0.2", } - revision.Spec.PodSpec.Containers = []corev1.Container{{ + revision.Spec.Containers = []corev1.Container{{ Name: servingContainerName, ReadinessProbe: testProbe, Resources: corev1.ResourceRequirements{ @@ -594,7 +596,7 @@ func TestMakeQueueContainerWithPercentageAnnotation(t *testing.T) { revision.Annotations = map[string]string{ serving.QueueSidecarResourcePercentageAnnotationKey: "foo", } - revision.Spec.PodSpec.Containers = []corev1.Container{{ + revision.Spec.Containers = []corev1.Container{{ Name: servingContainerName, ReadinessProbe: testProbe, Resources: corev1.ResourceRequirements{ @@ -621,7 +623,7 @@ func TestMakeQueueContainerWithPercentageAnnotation(t *testing.T) { revision.Annotations = map[string]string{ serving.QueueSidecarResourcePercentageAnnotationKey: "100", } - revision.Spec.PodSpec.Containers = []corev1.Container{{ + revision.Spec.Containers = []corev1.Container{{ Name: servingContainerName, ReadinessProbe: testProbe, Resources: corev1.ResourceRequirements{ @@ -682,7 +684,7 @@ func TestMakeQueueContainerWithResourceAnnotations(t *testing.T) { serving.QueueSidecarEphemeralStorageResourceRequestAnnotationKey: "500Mi", serving.QueueSidecarEphemeralStorageResourceLimitAnnotationKey: "600Mi", } - revision.Spec.PodSpec.Containers = []corev1.Container{{ + revision.Spec.Containers = []corev1.Container{{ Name: servingContainerName, ReadinessProbe: testProbe, }} @@ -710,7 +712,7 @@ func TestMakeQueueContainerWithResourceAnnotations(t *testing.T) { serving.QueueSidecarMemoryResourceRequestAnnotationKey: "Gdx", serving.QueueSidecarMemoryResourceLimitAnnotationKey: "2Gi", } - revision.Spec.PodSpec.Containers = []corev1.Container{{ + revision.Spec.Containers = []corev1.Container{{ Name: servingContainerName, ReadinessProbe: testProbe, }} @@ -731,7 +733,7 @@ func TestMakeQueueContainerWithResourceAnnotations(t *testing.T) { serving.QueueSidecarMemoryResourceLimitAnnotationKey: "4Gi", serving.QueueSidecarResourcePercentageAnnotationKey: "50", } - revision.Spec.PodSpec.Containers = []corev1.Container{{ + revision.Spec.Containers = []corev1.Container{{ Name: servingContainerName, ReadinessProbe: testProbe, Resources: corev1.ResourceRequirements{ @@ -783,7 +785,7 @@ func TestMakeQueueContainerWithResourceAnnotations(t *testing.T) { func TestProbeGenerationHTTPDefaults(t *testing.T) { rev := revision("bar", "foo", func(revision *v1.Revision) { - revision.Spec.PodSpec.Containers = []corev1.Container{{ + revision.Spec.Containers = []corev1.Container{{ Name: servingContainerName, ReadinessProbe: &corev1.Probe{ ProbeHandler: corev1.ProbeHandler{ @@ -806,8 +808,9 @@ func TestProbeGenerationHTTPDefaults(t *testing.T) { Scheme: corev1.URISchemeHTTP, }, }, - PeriodSeconds: 1, - TimeoutSeconds: 10, + PeriodSeconds: 1, + TimeoutSeconds: 10, + FailureThreshold: 1, } wantProbeJSON, err := json.Marshal(expectedProbe) @@ -829,8 +832,9 @@ func TestProbeGenerationHTTPDefaults(t *testing.T) { }}, }, }, - PeriodSeconds: 1, - TimeoutSeconds: 10, + PeriodSeconds: 1, + TimeoutSeconds: 10, + FailureThreshold: 1, } }) @@ -850,7 +854,7 @@ func TestProbeGenerationHTTP(t *testing.T) { rev := revision("bar", "foo", func(revision *v1.Revision) { - revision.Spec.PodSpec.Containers = []corev1.Container{{ + revision.Spec.Containers = []corev1.Container{{ Name: servingContainerName, Ports: []corev1.ContainerPort{{ ContainerPort: userPort, @@ -877,8 +881,9 @@ func TestProbeGenerationHTTP(t *testing.T) { Scheme: corev1.URISchemeHTTPS, }, }, - PeriodSeconds: 2, - TimeoutSeconds: 10, + PeriodSeconds: 2, + TimeoutSeconds: 10, + FailureThreshold: 1, } wantProbeJSON, err := json.Marshal(expectedProbe) @@ -901,8 +906,9 @@ func TestProbeGenerationHTTP(t *testing.T) { }}, }, }, - PeriodSeconds: 2, - TimeoutSeconds: 10, + PeriodSeconds: 2, + TimeoutSeconds: 10, + FailureThreshold: 1, } }) @@ -935,6 +941,7 @@ func TestTCPProbeGeneration(t *testing.T) { }, PeriodSeconds: 0, SuccessThreshold: 3, + FailureThreshold: 1, }, rev: v1.RevisionSpec{ TimeoutSeconds: ptr.Int64(45), @@ -968,9 +975,10 @@ func TestTCPProbeGeneration(t *testing.T) { }}, }, }, - PeriodSeconds: 0, + PeriodSeconds: 1, TimeoutSeconds: 0, SuccessThreshold: 3, + FailureThreshold: 1, } c.Env = env(map[string]string{"USER_PORT": strconv.Itoa(userPort)}) }), @@ -997,8 +1005,9 @@ func TestTCPProbeGeneration(t *testing.T) { Port: intstr.FromInt(int(v1.DefaultUserPort)), }, }, - PeriodSeconds: 1, - TimeoutSeconds: 1, + PeriodSeconds: 1, + TimeoutSeconds: 1, + FailureThreshold: 1, }, want: queueContainer(func(c *corev1.Container) { c.ReadinessProbe = &corev1.Probe{ @@ -1013,7 +1022,8 @@ func TestTCPProbeGeneration(t *testing.T) { }, PeriodSeconds: 1, // Inherit Kubernetes default here rather than overriding as we need to do for exec probe. - TimeoutSeconds: 0, + TimeoutSeconds: 0, + FailureThreshold: 1, } c.Env = env(map[string]string{}) }), diff --git a/pkg/webhook/podspec_dryrun.go b/pkg/webhook/podspec_dryrun.go index 852efc101b61..8346a692c317 100644 --- a/pkg/webhook/podspec_dryrun.go +++ b/pkg/webhook/podspec_dryrun.go @@ -61,6 +61,16 @@ func validatePodSpec(ctx context.Context, ps v1.RevisionSpec, namespace string) rev.SetDefaults(ctx) podSpec := resources.BuildPodSpec(rev, resources.BuildUserContainers(rev), nil /*configs*/) + // Add the drain volume that BuildUserContainers adds volume mounts for + // This is necessary because BuildUserContainers adds the volume mount but + // the volume itself is only added in makePodSpec when cfg is not nil + podSpec.Volumes = append(podSpec.Volumes, corev1.Volume{ + Name: "knative-drain-signal", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }) + // Make a sample pod with the template Revisions & PodSpec and dryrun call to API-server pod := &corev1.Pod{ ObjectMeta: om, diff --git a/test/e2e/websocket_test.go b/test/e2e/websocket_test.go index ec7be61ac064..4ef55e52f446 100644 --- a/test/e2e/websocket_test.go +++ b/test/e2e/websocket_test.go @@ -322,6 +322,11 @@ func TestWebSocketWithTimeout(t *testing.T) { idleTimeoutSeconds: 10, delay: "20", expectError: true, + }, { + name: "websocket does not drop after queue drain is called at 30s", + timeoutSeconds: 60, + delay: "45", + expectError: false, }} for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { @@ -349,6 +354,45 @@ func TestWebSocketWithTimeout(t *testing.T) { } } +func TestWebSocketDrain(t *testing.T) { + clients := Setup(t) + + testCases := []struct { + name string + timeoutSeconds int64 + delay string + expectError bool + }{{ + name: "websocket does not drop after queue drain is called", + timeoutSeconds: 60, + delay: "45", + expectError: false, + }} + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + names := test.ResourceNames{ + Service: test.ObjectNameForTest(t), + Image: wsServerTestImageName, + } + + // Clean up in both abnormal and normal exits. + test.EnsureTearDown(t, clients, &names) + + _, err := v1test.CreateServiceReady(t, clients, &names, + rtesting.WithRevisionTimeoutSeconds(tc.timeoutSeconds), + ) + if err != nil { + t.Fatal("Failed to create WebSocket server:", err) + } + // Validate the websocket connection. + err = ValidateWebSocketConnection(t, clients, names, tc.delay) + if (err == nil && tc.expectError) || (err != nil && !tc.expectError) { + t.Error(err) + } + }) + } +} + func abs(a int) int { if a < 0 { return -a diff --git a/test/prober.go b/test/prober.go index d1d254fa9400..e6d527b2bd48 100644 --- a/test/prober.go +++ b/test/prober.go @@ -201,7 +201,7 @@ func (m *manager) SLI() (total, failures int64) { total += pt failures += pf } - return + return total, failures } // Foreach implements ProberManager diff --git a/test/test_images/grpc-ping/proto/ping.pb.go b/test/test_images/grpc-ping/proto/ping.pb.go index f1cf30f9c35a..90b6f6b6b7e8 100644 --- a/test/test_images/grpc-ping/proto/ping.pb.go +++ b/test/test_images/grpc-ping/proto/ping.pb.go @@ -22,19 +22,22 @@ package ping import ( context "context" fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + proto "github.com/gogo/protobuf/proto" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" ) // Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +var ( + _ = proto.Marshal + _ = fmt.Errorf + _ = math.Inf +) // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. @@ -52,9 +55,11 @@ func (*Request) ProtoMessage() {} func (*Request) Descriptor() ([]byte, []int) { return fileDescriptor_baa7ad5f099fe3e5, []int{0} } + func (m *Request) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } + func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_Request.Marshal(b, m, deterministic) @@ -67,12 +72,15 @@ func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return b[:n], nil } } + func (m *Request) XXX_Merge(src proto.Message) { xxx_messageInfo_Request.Merge(m, src) } + func (m *Request) XXX_Size() int { return m.Size() } + func (m *Request) XXX_DiscardUnknown() { xxx_messageInfo_Request.DiscardUnknown(m) } @@ -96,9 +104,11 @@ func (*Response) ProtoMessage() {} func (*Response) Descriptor() ([]byte, []int) { return fileDescriptor_baa7ad5f099fe3e5, []int{1} } + func (m *Response) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } + func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_Response.Marshal(b, m, deterministic) @@ -111,12 +121,15 @@ func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return b[:n], nil } } + func (m *Response) XXX_Merge(src proto.Message) { xxx_messageInfo_Response.Merge(m, src) } + func (m *Response) XXX_Size() int { return m.Size() } + func (m *Response) XXX_DiscardUnknown() { xxx_messageInfo_Response.DiscardUnknown(m) } @@ -156,8 +169,10 @@ var fileDescriptor_baa7ad5f099fe3e5 = []byte{ } // Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn +var ( + _ context.Context + _ grpc.ClientConn +) // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. @@ -226,12 +241,12 @@ type PingServiceServer interface { } // UnimplementedPingServiceServer can be embedded to have forward compatible implementations. -type UnimplementedPingServiceServer struct { -} +type UnimplementedPingServiceServer struct{} func (*UnimplementedPingServiceServer) Ping(ctx context.Context, req *Request) (*Response, error) { return nil, status.Errorf(codes.Unimplemented, "method Ping not implemented") } + func (*UnimplementedPingServiceServer) PingStream(srv PingService_PingStreamServer) error { return status.Errorf(codes.Unimplemented, "method PingStream not implemented") } @@ -375,6 +390,7 @@ func encodeVarintPing(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } + func (m *Request) Size() (n int) { if m == nil { return 0 @@ -404,9 +420,11 @@ func (m *Response) Size() (n int) { func sovPing(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } + func sozPing(x uint64) (n int) { return sovPing(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } + func (m *Request) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -489,6 +507,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } return nil } + func (m *Response) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -571,6 +590,7 @@ func (m *Response) Unmarshal(dAtA []byte) error { } return nil } + func skipPing(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 From 0750a4dcb5d55ad52b7a1628ffaee9fb4807aa80 Mon Sep 17 00:00:00 2001 From: Elijah Roussos Date: Fri, 12 Sep 2025 11:47:11 -0400 Subject: [PATCH 04/13] chore: update go.mod --- go.mod | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index e9e0cb8f0b84..2cc07de02964 100644 --- a/go.mod +++ b/go.mod @@ -28,7 +28,7 @@ require ( golang.org/x/sys v0.36.0 golang.org/x/time v0.10.0 google.golang.org/api v0.198.0 - google.golang.org/grpc v1.74.2 + google.golang.org/grpc v1.75.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.33.4 k8s.io/apiextensions-apiserver v0.33.4 @@ -153,8 +153,8 @@ require ( golang.org/x/text v0.29.0 // indirect golang.org/x/tools v0.37.0 // indirect gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250818200422-3122310a409c // indirect google.golang.org/protobuf v1.36.8 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect From 962f69025bb50255e62aa8024d2bf16683ee1c49 Mon Sep 17 00:00:00 2001 From: Elijah Roussos Date: Fri, 12 Sep 2025 11:57:50 -0400 Subject: [PATCH 05/13] chore: update codegen --- vendor/knative.dev/pkg/controller/controller.go | 2 +- vendor/knative.dev/pkg/controller/queue_metrics.go | 8 +++----- vendor/knative.dev/pkg/controller/two_lane_queue.go | 4 ++-- vendor/knative.dev/pkg/hack/update-codegen.sh | 1 + vendor/knative.dev/pkg/hack/verify-codegen.sh | 5 +++++ vendor/knative.dev/pkg/network/transports.go | 3 +-- 6 files changed, 13 insertions(+), 10 deletions(-) diff --git a/vendor/knative.dev/pkg/controller/controller.go b/vendor/knative.dev/pkg/controller/controller.go index 6091f55fd9b4..7846399edf67 100644 --- a/vendor/knative.dev/pkg/controller/controller.go +++ b/vendor/knative.dev/pkg/controller/controller.go @@ -470,7 +470,7 @@ func (c *Impl) RunContext(ctx context.Context, threadiness int) error { } // Launch workers to process resources that get enqueued to our workqueue. - c.logger.Infow("Starting controller and workers", zap.Int("threadiness", threadiness)) + c.logger.Info("Starting controller and workers") for range threadiness { sg.Add(1) go func() { diff --git a/vendor/knative.dev/pkg/controller/queue_metrics.go b/vendor/knative.dev/pkg/controller/queue_metrics.go index 2e61e330ea51..dcd5889981ad 100644 --- a/vendor/knative.dev/pkg/controller/queue_metrics.go +++ b/vendor/knative.dev/pkg/controller/queue_metrics.go @@ -88,15 +88,13 @@ func (m *queueMetrics) get(item any) { m.mu.Lock() defer m.mu.Unlock() + m.depth.Dec() + m.processingStartTimes[item] = m.clock.Now() + if startTime, exists := m.addTimes[item]; exists { - m.depth.Dec() m.latency.Observe(m.sinceInSeconds(startTime)) delete(m.addTimes, item) } - - if _, exists := m.processingStartTimes[item]; !exists { - m.processingStartTimes[item] = m.clock.Now() - } } func (m *queueMetrics) done(item any) { diff --git a/vendor/knative.dev/pkg/controller/two_lane_queue.go b/vendor/knative.dev/pkg/controller/two_lane_queue.go index 0c1879ded805..294255131887 100644 --- a/vendor/knative.dev/pkg/controller/two_lane_queue.go +++ b/vendor/knative.dev/pkg/controller/two_lane_queue.go @@ -223,9 +223,9 @@ func (q *twoLaneRateLimitingQueue) slowLane() workqueue.TypedInterface[any] { // It gets the item from fast lane if it has anything, alternatively // the slow lane. func (tlq *twoLaneQueue) Get() (any, bool) { - item, shutdown := tlq.consumerQueue.Get() + item, ok := tlq.consumerQueue.Get() tlq.metrics.get(item) - return item, shutdown + return item, ok } // Len returns the sum of lengths. diff --git a/vendor/knative.dev/pkg/hack/update-codegen.sh b/vendor/knative.dev/pkg/hack/update-codegen.sh index 836c5ddadd76..7e3ad4abd447 100644 --- a/vendor/knative.dev/pkg/hack/update-codegen.sh +++ b/vendor/knative.dev/pkg/hack/update-codegen.sh @@ -73,6 +73,7 @@ go run k8s.io/code-generator/cmd/deepcopy-gen \ knative.dev/pkg/apis/duck/v1 \ knative.dev/pkg/tracker \ knative.dev/pkg/logging \ + knative.dev/pkg/metrics \ knative.dev/pkg/testing \ knative.dev/pkg/testing/duck \ knative.dev/pkg/webhook/resourcesemantics/conversion/internal diff --git a/vendor/knative.dev/pkg/hack/verify-codegen.sh b/vendor/knative.dev/pkg/hack/verify-codegen.sh index f5d36632501f..59fbeea852f3 100644 --- a/vendor/knative.dev/pkg/hack/verify-codegen.sh +++ b/vendor/knative.dev/pkg/hack/verify-codegen.sh @@ -37,6 +37,7 @@ cp -aR \ "${REPO_ROOT_DIR}/go.sum" \ "${REPO_ROOT_DIR}/apis" \ "${REPO_ROOT_DIR}/logging" \ + "${REPO_ROOT_DIR}/metrics" \ "${REPO_ROOT_DIR}/testing" \ "${REPO_ROOT_DIR}/vendor" \ "${TMP_DIFFROOT}" @@ -54,6 +55,9 @@ diff -Naupr --no-dereference \ diff -Naupr --no-dereference \ "${REPO_ROOT_DIR}/logging" "${TMP_DIFFROOT}/logging" || ret=1 +diff -Naupr --no-dereference \ + "${REPO_ROOT_DIR}/metrics" "${TMP_DIFFROOT}/metrics" || ret=1 + diff -Naupr --no-dereference \ "${REPO_ROOT_DIR}/testing" "${TMP_DIFFROOT}/testing" || ret=1 @@ -65,6 +69,7 @@ rm -fr \ "${REPO_ROOT_DIR}/go.sum" \ "${REPO_ROOT_DIR}/apis" \ "${REPO_ROOT_DIR}/logging" \ + "${REPO_ROOT_DIR}/metrics" \ "${REPO_ROOT_DIR}/testing" \ "${REPO_ROOT_DIR}/vendor" diff --git a/vendor/knative.dev/pkg/network/transports.go b/vendor/knative.dev/pkg/network/transports.go index ce8a72b4ebc2..1e9c6c219865 100644 --- a/vendor/knative.dev/pkg/network/transports.go +++ b/vendor/knative.dev/pkg/network/transports.go @@ -96,8 +96,7 @@ func dialBackOffHelper(ctx context.Context, network, address string, bo wait.Bac if tlsConf == nil { c, err = dialer.DialContext(ctx, network, address) } else { - d := tls.Dialer{NetDialer: dialer, Config: tlsConf} - c, err = d.DialContext(ctx, network, address) + c, err = tls.DialWithDialer(dialer, network, address, tlsConf) } if err != nil { var errNet net.Error From b1f26b807c3b8e9b403297484de08691e14b87f2 Mon Sep 17 00:00:00 2001 From: Elijah Roussos Date: Fri, 12 Sep 2025 12:40:13 -0400 Subject: [PATCH 06/13] test: improve testing suite for new drain logic --- .../sharedmain/handlers_integration_test.go | 259 ++++++++++++++++++ pkg/queue/sharedmain/handlers_test.go | 236 ++++++++++++++++ 2 files changed, 495 insertions(+) create mode 100644 pkg/queue/sharedmain/handlers_integration_test.go diff --git a/pkg/queue/sharedmain/handlers_integration_test.go b/pkg/queue/sharedmain/handlers_integration_test.go new file mode 100644 index 000000000000..fdf3ef0adc29 --- /dev/null +++ b/pkg/queue/sharedmain/handlers_integration_test.go @@ -0,0 +1,259 @@ +/* +Copyright 2024 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sharedmain + +import ( + "net" + "net/http" + "net/http/httptest" + "os" + "sync" + "sync/atomic" + "testing" + "time" + + "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/trace" + "go.uber.org/zap" + + netstats "knative.dev/networking/pkg/http/stats" + "knative.dev/pkg/network" + "knative.dev/serving/pkg/observability" +) + +func TestMainHandlerWithPendingRequests(t *testing.T) { + logger := zap.NewNop().Sugar() + tp := trace.NewTracerProvider() + mp := metric.NewMeterProvider() + + // Create a backend server to proxy to + backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Simulate some processing time + time.Sleep(50 * time.Millisecond) + w.WriteHeader(http.StatusOK) + w.Write([]byte("backend response")) + })) + defer backend.Close() + + // Extract port from backend URL + _, port, _ := net.SplitHostPort(backend.Listener.Addr().String()) + + env := config{ + ContainerConcurrency: 10, + QueueServingPort: "8080", + QueueServingTLSPort: "8443", + UserPort: port, + RevisionTimeoutSeconds: 300, + ServingLoggingConfig: "", + ServingLoggingLevel: "info", + Observability: *observability.DefaultConfig(), + Env: Env{ + ServingNamespace: "test-namespace", + ServingConfiguration: "test-config", + ServingRevision: "test-revision", + ServingPod: "test-pod", + ServingPodIP: "10.0.0.1", + }, + } + + transport := buildTransport(env, tp, mp) + prober := func() bool { return true } + stats := netstats.NewRequestStats(time.Now()) + pendingRequests := atomic.Int32{} + + handler, drainer := mainHandler(env, transport, prober, stats, logger, mp, tp, &pendingRequests) + + t.Run("tracks pending requests correctly", func(t *testing.T) { + // Make a regular request + req := httptest.NewRequest(http.MethodGet, "/test", nil) + req.Host = "test.example.com" + + var wg sync.WaitGroup + wg.Add(1) + + go func() { + defer wg.Done() + w := httptest.NewRecorder() + handler.ServeHTTP(w, req) + }() + + // Give the request time to start + time.Sleep(10 * time.Millisecond) + + // Check that pending request counter was incremented + count := pendingRequests.Load() + if count != 1 { + t.Errorf("Expected 1 pending request, got %d", count) + } + + wg.Wait() + + // Check that counter was decremented after completion + if pendingRequests.Load() != 0 { + t.Errorf("Expected 0 pending requests after completion, got %d", pendingRequests.Load()) + } + }) + + t.Run("does not track probe requests", func(t *testing.T) { + // Make a probe request + req := httptest.NewRequest(http.MethodGet, "/", nil) + req.Header.Set(network.ProbeHeaderName, network.ProbeHeaderValue) + w := httptest.NewRecorder() + + handler.ServeHTTP(w, req) + + // Check that pending request counter was not incremented + if pendingRequests.Load() != 0 { + t.Errorf("Expected 0 pending requests for probe, got %d", pendingRequests.Load()) + } + }) + + t.Run("handles concurrent requests", func(t *testing.T) { + numRequests := 5 + var wg sync.WaitGroup + wg.Add(numRequests) + + for i := 0; i < numRequests; i++ { + go func(i int) { + defer wg.Done() + req := httptest.NewRequest(http.MethodGet, "/test", nil) + req.Host = "test.example.com" + w := httptest.NewRecorder() + handler.ServeHTTP(w, req) + }(i) + } + + // Give requests time to start + time.Sleep(20 * time.Millisecond) + + // Check that multiple requests are being tracked + count := pendingRequests.Load() + if count <= 0 || count > int32(numRequests) { + t.Errorf("Expected pending requests between 1 and %d, got %d", numRequests, count) + } + + wg.Wait() + + // Check that all requests completed + if pendingRequests.Load() != 0 { + t.Errorf("Expected 0 pending requests after all completed, got %d", pendingRequests.Load()) + } + }) + + t.Run("drainer integration", func(t *testing.T) { + // Start a long-running request + req := httptest.NewRequest(http.MethodGet, "/test", nil) + req.Host = "test.example.com" + + var wg sync.WaitGroup + wg.Add(1) + + go func() { + defer wg.Done() + w := httptest.NewRecorder() + handler.ServeHTTP(w, req) + }() + + // Give request time to start + time.Sleep(10 * time.Millisecond) + + // Verify request is being tracked + if pendingRequests.Load() != 1 { + t.Errorf("Expected 1 pending request before drain, got %d", pendingRequests.Load()) + } + + // Call drain + drainer.Drain() + + // Wait for request to complete + wg.Wait() + + // Verify counter is back to 0 + if pendingRequests.Load() != 0 { + t.Errorf("Expected 0 pending requests after drain, got %d", pendingRequests.Load()) + } + }) +} + +func TestBuildBreaker(t *testing.T) { + logger := zap.NewNop().Sugar() + + t.Run("returns nil for unlimited concurrency", func(t *testing.T) { + env := config{ + ContainerConcurrency: 0, + } + breaker := buildBreaker(logger, env) + if breaker != nil { + t.Error("Expected nil breaker for unlimited concurrency") + } + }) + + t.Run("creates breaker with correct params", func(t *testing.T) { + env := config{ + ContainerConcurrency: 10, + } + breaker := buildBreaker(logger, env) + if breaker == nil { + t.Fatal("Expected non-nil breaker") + } + // The breaker should be configured with QueueDepth = 10 * ContainerConcurrency + // and MaxConcurrency = ContainerConcurrency + }) +} + +func TestBuildProbe(t *testing.T) { + logger := zap.NewNop().Sugar() + + t.Run("creates probe without HTTP2 auto-detection", func(t *testing.T) { + encodedProbe := `{"httpGet":{"path":"/health","port":8080}}` + probe := buildProbe(logger, encodedProbe, false, false) + if probe == nil { + t.Fatal("Expected non-nil probe") + } + }) + + t.Run("creates probe with HTTP2 auto-detection", func(t *testing.T) { + encodedProbe := `{"httpGet":{"path":"/health","port":8080}}` + probe := buildProbe(logger, encodedProbe, true, false) + if probe == nil { + t.Fatal("Expected non-nil probe") + } + }) +} + +func TestExists(t *testing.T) { + logger := zap.NewNop().Sugar() + + t.Run("returns true for existing file", func(t *testing.T) { + // Create a temporary file + tmpfile, err := os.CreateTemp("", "test") + if err != nil { + t.Fatal(err) + } + defer os.Remove(tmpfile.Name()) + + if !exists(logger, tmpfile.Name()) { + t.Error("Expected true for existing file") + } + }) + + t.Run("returns false for non-existent file", func(t *testing.T) { + if exists(logger, "/non/existent/file/path") { + t.Error("Expected false for non-existent file") + } + }) +} \ No newline at end of file diff --git a/pkg/queue/sharedmain/handlers_test.go b/pkg/queue/sharedmain/handlers_test.go index 64d0b853ee9b..2dd3141aa52c 100644 --- a/pkg/queue/sharedmain/handlers_test.go +++ b/pkg/queue/sharedmain/handlers_test.go @@ -20,11 +20,15 @@ import ( "context" "net/http" "net/http/httptest" + "sync" "sync/atomic" "testing" + "time" "go.uber.org/zap" + "knative.dev/pkg/network" pkghandler "knative.dev/pkg/network/handlers" + "knative.dev/serving/pkg/queue" ) func TestDrainCompleteEndpoint(t *testing.T) { @@ -69,3 +73,235 @@ func TestDrainCompleteEndpoint(t *testing.T) { } }) } + +func TestRequestQueueDrainHandler(t *testing.T) { + logger := zap.NewNop().Sugar() + + t.Run("handles drain request when context is done", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + drainer := &pkghandler.Drainer{ + QuietPeriod: 100 * time.Millisecond, + Inner: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }), + } + pendingRequests := atomic.Int32{} + + handler := adminHandler(ctx, logger, drainer, &pendingRequests) + + // Cancel context to simulate TERM signal + cancel() + + req := httptest.NewRequest(http.MethodPost, queue.RequestQueueDrainPath, nil) + w := httptest.NewRecorder() + + // This should call drainer.Drain() and return immediately + handler.ServeHTTP(w, req) + + if w.Code != http.StatusOK { + t.Errorf("Expected status 200, got %d", w.Code) + } + + // Verify the drainer is in draining state by sending a probe + probeReq := httptest.NewRequest(http.MethodGet, "/", nil) + probeReq.Header.Set("User-Agent", "kube-probe/1.0") + probeW := httptest.NewRecorder() + drainer.ServeHTTP(probeW, probeReq) + + // Should return 503 because drainer is draining + if probeW.Code != http.StatusServiceUnavailable { + t.Errorf("Expected probe to return 503 during drain, got %d", probeW.Code) + } + }) + + t.Run("resets drainer after timeout when context not done", func(t *testing.T) { + ctx := context.Background() + drainer := &pkghandler.Drainer{ + QuietPeriod: 100 * time.Millisecond, + Inner: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }), + } + pendingRequests := atomic.Int32{} + + handler := adminHandler(ctx, logger, drainer, &pendingRequests) + + req := httptest.NewRequest(http.MethodPost, queue.RequestQueueDrainPath, nil) + w := httptest.NewRecorder() + + // Start the drain in a goroutine since Drain() blocks + done := make(chan bool) + go func() { + handler.ServeHTTP(w, req) + done <- true + }() + + // Give it time to start draining + time.Sleep(50 * time.Millisecond) + + // Check that drainer is draining + probeReq := httptest.NewRequest(http.MethodGet, "/", nil) + probeReq.Header.Set("User-Agent", "kube-probe/1.0") + probeW := httptest.NewRecorder() + drainer.ServeHTTP(probeW, probeReq) + + if probeW.Code != http.StatusServiceUnavailable { + t.Errorf("Expected probe to return 503 during drain, got %d", probeW.Code) + } + + // Wait for the reset to happen (after 1 second) + time.Sleep(1100 * time.Millisecond) + + // Check that drainer has been reset and is no longer draining + probeReq2 := httptest.NewRequest(http.MethodGet, "/", nil) + probeReq2.Header.Set("User-Agent", "kube-probe/1.0") + probeW2 := httptest.NewRecorder() + drainer.ServeHTTP(probeW2, probeReq2) + + // Should return 200 because drainer was reset + if probeW2.Code != http.StatusOK { + t.Errorf("Expected probe to return 200 after reset, got %d", probeW2.Code) + } + + // Clean up + select { + case <-done: + case <-time.After(2 * time.Second): + t.Error("Handler did not complete in time") + } + }) +} + +func TestWithRequestCounter(t *testing.T) { + pendingRequests := atomic.Int32{} + + // Create a test handler that we'll wrap + baseHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Sleep briefly to ensure counter is incremented + time.Sleep(10 * time.Millisecond) + w.WriteHeader(http.StatusOK) + }) + + wrappedHandler := withRequestCounter(baseHandler, &pendingRequests) + + t.Run("increments counter for regular requests", func(t *testing.T) { + req := httptest.NewRequest(http.MethodGet, "/", nil) + w := httptest.NewRecorder() + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + wrappedHandler.ServeHTTP(w, req) + }() + + // Give the request time to start + time.Sleep(5 * time.Millisecond) + + // Check that counter was incremented + if pendingRequests.Load() != 1 { + t.Errorf("Expected pending requests to be 1, got %d", pendingRequests.Load()) + } + + wg.Wait() + + // Check that counter was decremented after request completed + if pendingRequests.Load() != 0 { + t.Errorf("Expected pending requests to be 0 after completion, got %d", pendingRequests.Load()) + } + }) + + t.Run("skips counter for probe requests", func(t *testing.T) { + req := httptest.NewRequest(http.MethodGet, "/", nil) + req.Header.Set(network.ProbeHeaderName, network.ProbeHeaderValue) + w := httptest.NewRecorder() + + wrappedHandler.ServeHTTP(w, req) + + // Check that counter was not incremented + if pendingRequests.Load() != 0 { + t.Errorf("Expected pending requests to remain 0 for probe, got %d", pendingRequests.Load()) + } + }) + + t.Run("skips counter for kube-probe requests", func(t *testing.T) { + req := httptest.NewRequest(http.MethodGet, "/", nil) + req.Header.Set("User-Agent", "kube-probe/1.27") + w := httptest.NewRecorder() + + wrappedHandler.ServeHTTP(w, req) + + // Check that counter was not incremented + if pendingRequests.Load() != 0 { + t.Errorf("Expected pending requests to remain 0 for kube-probe, got %d", pendingRequests.Load()) + } + }) + + t.Run("handles concurrent requests correctly", func(t *testing.T) { + // Reset counter + pendingRequests.Store(0) + + numRequests := 10 + var wg sync.WaitGroup + wg.Add(numRequests) + + for i := 0; i < numRequests; i++ { + go func() { + defer wg.Done() + req := httptest.NewRequest(http.MethodGet, "/", nil) + w := httptest.NewRecorder() + wrappedHandler.ServeHTTP(w, req) + }() + } + + // Give requests time to start + time.Sleep(5 * time.Millisecond) + + // Check that all requests are being tracked + count := pendingRequests.Load() + if count <= 0 || count > int32(numRequests) { + t.Errorf("Expected pending requests to be between 1 and %d, got %d", numRequests, count) + } + + wg.Wait() + + // Check that counter returned to 0 + if pendingRequests.Load() != 0 { + t.Errorf("Expected pending requests to be 0 after all completed, got %d", pendingRequests.Load()) + } + }) +} + +func TestWithFullDuplex(t *testing.T) { + logger := zap.NewNop().Sugar() + + baseHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + + t.Run("passes through when disabled", func(t *testing.T) { + wrappedHandler := withFullDuplex(baseHandler, false, logger) + + req := httptest.NewRequest(http.MethodGet, "/", nil) + w := httptest.NewRecorder() + + wrappedHandler.ServeHTTP(w, req) + + if w.Code != http.StatusOK { + t.Errorf("Expected status 200, got %d", w.Code) + } + }) + + t.Run("enables full duplex when configured", func(t *testing.T) { + wrappedHandler := withFullDuplex(baseHandler, true, logger) + + req := httptest.NewRequest(http.MethodGet, "/", nil) + w := httptest.NewRecorder() + + wrappedHandler.ServeHTTP(w, req) + + if w.Code != http.StatusOK { + t.Errorf("Expected status 200, got %d", w.Code) + } + }) +} From f6dd4b96f4e548b551bf839a8269265d2ef75c2d Mon Sep 17 00:00:00 2001 From: Elijah Roussos Date: Fri, 12 Sep 2025 13:19:43 -0400 Subject: [PATCH 07/13] chore: run goimports --- pkg/queue/sharedmain/handlers_integration_test.go | 4 ++-- pkg/queue/sharedmain/handlers_test.go | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/pkg/queue/sharedmain/handlers_integration_test.go b/pkg/queue/sharedmain/handlers_integration_test.go index fdf3ef0adc29..aba6a5cadd32 100644 --- a/pkg/queue/sharedmain/handlers_integration_test.go +++ b/pkg/queue/sharedmain/handlers_integration_test.go @@ -127,7 +127,7 @@ func TestMainHandlerWithPendingRequests(t *testing.T) { var wg sync.WaitGroup wg.Add(numRequests) - for i := 0; i < numRequests; i++ { + for i := range numRequests { go func(i int) { defer wg.Done() req := httptest.NewRequest(http.MethodGet, "/test", nil) @@ -256,4 +256,4 @@ func TestExists(t *testing.T) { t.Error("Expected false for non-existent file") } }) -} \ No newline at end of file +} diff --git a/pkg/queue/sharedmain/handlers_test.go b/pkg/queue/sharedmain/handlers_test.go index 2dd3141aa52c..67ca8969a7eb 100644 --- a/pkg/queue/sharedmain/handlers_test.go +++ b/pkg/queue/sharedmain/handlers_test.go @@ -101,13 +101,13 @@ func TestRequestQueueDrainHandler(t *testing.T) { if w.Code != http.StatusOK { t.Errorf("Expected status 200, got %d", w.Code) } - + // Verify the drainer is in draining state by sending a probe probeReq := httptest.NewRequest(http.MethodGet, "/", nil) probeReq.Header.Set("User-Agent", "kube-probe/1.0") probeW := httptest.NewRecorder() drainer.ServeHTTP(probeW, probeReq) - + // Should return 503 because drainer is draining if probeW.Code != http.StatusServiceUnavailable { t.Errorf("Expected probe to return 503 during drain, got %d", probeW.Code) @@ -144,7 +144,7 @@ func TestRequestQueueDrainHandler(t *testing.T) { probeReq.Header.Set("User-Agent", "kube-probe/1.0") probeW := httptest.NewRecorder() drainer.ServeHTTP(probeW, probeReq) - + if probeW.Code != http.StatusServiceUnavailable { t.Errorf("Expected probe to return 503 during drain, got %d", probeW.Code) } @@ -157,7 +157,7 @@ func TestRequestQueueDrainHandler(t *testing.T) { probeReq2.Header.Set("User-Agent", "kube-probe/1.0") probeW2 := httptest.NewRecorder() drainer.ServeHTTP(probeW2, probeReq2) - + // Should return 200 because drainer was reset if probeW2.Code != http.StatusOK { t.Errorf("Expected probe to return 200 after reset, got %d", probeW2.Code) @@ -245,7 +245,7 @@ func TestWithRequestCounter(t *testing.T) { var wg sync.WaitGroup wg.Add(numRequests) - for i := 0; i < numRequests; i++ { + for range numRequests { go func() { defer wg.Done() req := httptest.NewRequest(http.MethodGet, "/", nil) From 7a48a83894bf8c4a0643b940a6bee1023f573309 Mon Sep 17 00:00:00 2001 From: Elijah Roussos Date: Thu, 25 Sep 2025 10:48:29 -0400 Subject: [PATCH 08/13] fix: prevent PreStop deadlock with two-stage drain signal mechanism The previous implementation had a circular dependency where: - User container PreStop waited for drain-complete file - Queue-proxy only wrote drain-complete after receiving SIGTERM - But SIGTERM was blocked waiting for PreStop to finish This fix implements a two-stage drain signal: 1. Queue-proxy PreStop writes drain-started immediately on pod deletion 2. User container PreStop waits for drain-started (with 3s timeout for safety) 3. Queue-proxy SIGTERM handler drains requests and writes drain-complete 4. User container waits for drain-complete before allowing termination This ensures proper shutdown sequencing without deadlock while still delaying user container termination until queue-proxy has drained. Also includes cleanup of stale drain signal files on queue-proxy startup. feat: improve PreStop drain coordination with exponential backoff - Replace fixed 3-second wait with exponential backoff (1, 2, 4, 8 seconds) - Change drain-complete check interval from 0.1s to 1s to reduce CPU usage - Exit gracefully if drain-started is never detected after retries - More robust handling of queue-proxy failures or slow PreStop execution This provides better resilience against timing issues while reducing unnecessary CPU usage during the wait loop. test: add comprehensive integration tests for shutdown coordination Add integration tests to verify the PreStop shutdown coordination works correctly in various scenarios: - Normal shutdown sequence with proper signal ordering - Queue-proxy crash/failure scenarios - High load conditions with many pending requests - File system permission issues - Race condition testing with 50 iterations - Long-running requests that exceed typical drain timeout These tests ensure the exponential backoff and two-stage drain signal mechanism handles edge cases gracefully. Run with: go test -tags=integration -race ./pkg/queue/sharedmain refactor: extract drain signal paths and logic to shared constants Based on PR review feedback, centralize drain signal configuration: - Create pkg/queue/drain/signals.go with all drain-related constants - Define signal file paths (DrainStartedFile, DrainCompleteFile) - Extract shell script logic into BuildDrainWaitScript() function - Define exponential backoff delays and check intervals as constants - Update all references to use the new constants package This improves code maintainability and makes it easier to modify drain behavior in the future. All file paths and timing parameters are now defined in a single location. --- pkg/queue/drain/signals.go | 56 +++ pkg/queue/drain/signals_test.go | 242 +++++++++++ pkg/queue/sharedmain/main.go | 16 +- .../sharedmain/shutdown_integration_test.go | 403 ++++++++++++++++++ pkg/reconciler/revision/resources/deploy.go | 5 +- .../resources/deploy_lifecycle_test.go | 10 +- .../revision/resources/deploy_test.go | 10 + pkg/reconciler/revision/resources/queue.go | 11 + 8 files changed, 741 insertions(+), 12 deletions(-) create mode 100644 pkg/queue/drain/signals.go create mode 100644 pkg/queue/drain/signals_test.go create mode 100644 pkg/queue/sharedmain/shutdown_integration_test.go diff --git a/pkg/queue/drain/signals.go b/pkg/queue/drain/signals.go new file mode 100644 index 000000000000..cdcc78708484 --- /dev/null +++ b/pkg/queue/drain/signals.go @@ -0,0 +1,56 @@ +/* +Copyright 2024 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package drain + +const ( + // SignalDirectory is the directory where drain signal files are created + SignalDirectory = "/var/run/knative" + + // DrainStartedFile indicates that pod termination has begun and queue-proxy is handling shutdown + DrainStartedFile = SignalDirectory + "/drain-started" + + // DrainCompleteFile indicates that queue-proxy has finished draining requests + DrainCompleteFile = SignalDirectory + "/drain-complete" + + // DrainCheckInterval is how often to check for drain completion + DrainCheckInterval = "1" // seconds + + // ExponentialBackoffDelays are the delays in seconds for checking drain-started file + // Total max wait time: 1+2+4+8 = 15 seconds + ExponentialBackoffDelays = "1 2 4 8" +) + +// BuildDrainWaitScript generates the shell script for waiting on drain signals. +// If existingCommand is provided, it will be executed before the drain wait. +func BuildDrainWaitScript(existingCommand string) string { + drainLogic := `for i in ` + ExponentialBackoffDelays + `; do ` + + ` if [ -f ` + DrainStartedFile + ` ]; then ` + + ` until [ -f ` + DrainCompleteFile + ` ]; do sleep ` + DrainCheckInterval + `; done; ` + + ` exit 0; ` + + ` fi; ` + + ` sleep $i; ` + + `done; ` + + `exit 0` + + if existingCommand != "" { + return existingCommand + "; " + drainLogic + } + return drainLogic +} + +// QueueProxyPreStopScript is the script executed by queue-proxy's PreStop hook +const QueueProxyPreStopScript = "touch " + DrainStartedFile diff --git a/pkg/queue/drain/signals_test.go b/pkg/queue/drain/signals_test.go new file mode 100644 index 000000000000..7e4bc52cf79a --- /dev/null +++ b/pkg/queue/drain/signals_test.go @@ -0,0 +1,242 @@ +/* +Copyright 2024 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package drain + +import ( + "strings" + "testing" +) + +func TestConstants(t *testing.T) { + tests := []struct { + name string + got string + expected string + }{ + { + name: "SignalDirectory", + got: SignalDirectory, + expected: "/var/run/knative", + }, + { + name: "DrainStartedFile", + got: DrainStartedFile, + expected: "/var/run/knative/drain-started", + }, + { + name: "DrainCompleteFile", + got: DrainCompleteFile, + expected: "/var/run/knative/drain-complete", + }, + { + name: "DrainCheckInterval", + got: DrainCheckInterval, + expected: "1", + }, + { + name: "ExponentialBackoffDelays", + got: ExponentialBackoffDelays, + expected: "1 2 4 8", + }, + { + name: "QueueProxyPreStopScript", + got: QueueProxyPreStopScript, + expected: "touch /var/run/knative/drain-started", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.got != tt.expected { + t.Errorf("got %q, want %q", tt.got, tt.expected) + } + }) + } +} + +func TestBuildDrainWaitScript(t *testing.T) { + tests := []struct { + name string + existingCommand string + wantContains []string + wantExact bool + }{ + { + name: "without existing command", + existingCommand: "", + wantContains: []string{ + "for i in 1 2 4 8", + "if [ -f /var/run/knative/drain-started ]", + "until [ -f /var/run/knative/drain-complete ]", + "sleep 1", + "sleep $i", + "exit 0", + }, + wantExact: false, + }, + { + name: "with existing command", + existingCommand: "echo 'custom prestop'", + wantContains: []string{ + "echo 'custom prestop'", + "for i in 1 2 4 8", + "if [ -f /var/run/knative/drain-started ]", + "until [ -f /var/run/knative/drain-complete ]", + "sleep 1", + "sleep $i", + "exit 0", + }, + wantExact: false, + }, + { + name: "with complex existing command", + existingCommand: "/bin/sh -c 'kill -TERM 1 && wait'", + wantContains: []string{ + "/bin/sh -c 'kill -TERM 1 && wait'", + "for i in 1 2 4 8", + "if [ -f /var/run/knative/drain-started ]", + "until [ -f /var/run/knative/drain-complete ]", + }, + wantExact: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := BuildDrainWaitScript(tt.existingCommand) + + for _, want := range tt.wantContains { + if !strings.Contains(got, want) { + t.Errorf("BuildDrainWaitScript() missing expected substring %q\nGot: %q", want, got) + } + } + + // Verify the command structure + if tt.existingCommand != "" { + // Should start with the existing command + if !strings.HasPrefix(got, tt.existingCommand+"; ") { + t.Errorf("BuildDrainWaitScript() should start with existing command followed by '; '\nGot: %q", got) + } + } + + // Verify the script ends with exit 0 + if !strings.HasSuffix(got, "exit 0") { + t.Errorf("BuildDrainWaitScript() should end with 'exit 0'\nGot: %q", got) + } + }) + } +} + +func TestBuildDrainWaitScriptStructure(t *testing.T) { + // Test the exact structure of the generated script without existing command + got := BuildDrainWaitScript("") + expected := "for i in 1 2 4 8; do " + + " if [ -f /var/run/knative/drain-started ]; then " + + " until [ -f /var/run/knative/drain-complete ]; do sleep 1; done; " + + " exit 0; " + + " fi; " + + " sleep $i; " + + "done; " + + "exit 0" + + if got != expected { + t.Errorf("BuildDrainWaitScript(\"\") structure mismatch\nGot: %q\nExpected: %q", got, expected) + } +} + +func TestBuildDrainWaitScriptWithCommandStructure(t *testing.T) { + // Test the exact structure of the generated script with existing command + existingCmd := "echo 'test'" + got := BuildDrainWaitScript(existingCmd) + expected := "echo 'test'; for i in 1 2 4 8; do " + + " if [ -f /var/run/knative/drain-started ]; then " + + " until [ -f /var/run/knative/drain-complete ]; do sleep 1; done; " + + " exit 0; " + + " fi; " + + " sleep $i; " + + "done; " + + "exit 0" + + if got != expected { + t.Errorf("BuildDrainWaitScript with command structure mismatch\nGot: %q\nExpected: %q", got, expected) + } +} + +func TestBuildDrainWaitScriptEdgeCases(t *testing.T) { + tests := []struct { + name string + existingCommand string + checkFunc func(t *testing.T, result string) + }{ + { + name: "empty string produces valid script", + existingCommand: "", + checkFunc: func(t *testing.T, result string) { + if result == "" { + t.Error("BuildDrainWaitScript(\"\") should not return empty string") + } + if !strings.Contains(result, "for i in") { + t.Error("BuildDrainWaitScript(\"\") should contain for loop") + } + }, + }, + { + name: "command with semicolon", + existingCommand: "cmd1; cmd2", + checkFunc: func(t *testing.T, result string) { + if !strings.HasPrefix(result, "cmd1; cmd2; ") { + t.Error("BuildDrainWaitScript should preserve command with semicolons") + } + }, + }, + { + name: "command with special characters", + existingCommand: "echo '$VAR' && test -f /tmp/file", + checkFunc: func(t *testing.T, result string) { + if !strings.HasPrefix(result, "echo '$VAR' && test -f /tmp/file; ") { + t.Error("BuildDrainWaitScript should preserve special characters") + } + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := BuildDrainWaitScript(tt.existingCommand) + tt.checkFunc(t, result) + }) + } +} + +func BenchmarkBuildDrainWaitScript(b *testing.B) { + testCases := []struct { + name string + command string + }{ + {"NoCommand", ""}, + {"SimpleCommand", "echo test"}, + {"ComplexCommand", "/bin/sh -c 'kill -TERM 1 && wait'"}, + } + + for _, tc := range testCases { + b.Run(tc.name, func(b *testing.B) { + for range b.N { + _ = BuildDrainWaitScript(tc.command) + } + }) + } +} diff --git a/pkg/queue/sharedmain/main.go b/pkg/queue/sharedmain/main.go index af5d613c24bc..37d28b2b0f4b 100644 --- a/pkg/queue/sharedmain/main.go +++ b/pkg/queue/sharedmain/main.go @@ -49,6 +49,7 @@ import ( "knative.dev/serving/pkg/observability" "knative.dev/serving/pkg/queue" "knative.dev/serving/pkg/queue/certificate" + "knative.dev/serving/pkg/queue/drain" "knative.dev/serving/pkg/queue/readiness" ) @@ -273,8 +274,9 @@ func Main(opts ...Option) error { logger.Info("Starting queue-proxy") - // Clean up any stale drain signal file from previous runs - os.Remove("/var/run/knative/drain-complete") + // Clean up any stale drain signal files from previous runs + os.Remove(drain.DrainStartedFile) + os.Remove(drain.DrainCompleteFile) errCh := make(chan error) for name, server := range httpServers { @@ -322,14 +324,16 @@ func Main(opts ...Option) error { for range ticker.C { if pendingRequests.Load() <= 0 { logger.Infof("Drain: all pending requests completed") - // Write drain signal file for PreStop hooks to detect - if err := os.WriteFile("/var/run/knative/drain-complete", []byte(""), 0o600); err != nil { - logger.Errorw("Failed to write drain signal file", zap.Error(err)) - } break WaitOnPendingRequests } } + // Write drain-complete signal file after draining is done + // This signals to user containers that queue-proxy has finished draining + if err := os.WriteFile(drain.DrainCompleteFile, []byte(""), 0o600); err != nil { + logger.Errorw("Failed to write drain-complete signal file", zap.Error(err)) + } + for name, srv := range httpServers { logger.Info("Shutting down server: ", name) if err := srv.Shutdown(context.Background()); err != nil { diff --git a/pkg/queue/sharedmain/shutdown_integration_test.go b/pkg/queue/sharedmain/shutdown_integration_test.go new file mode 100644 index 000000000000..5c4bfe0e7444 --- /dev/null +++ b/pkg/queue/sharedmain/shutdown_integration_test.go @@ -0,0 +1,403 @@ +//go:build integration +// +build integration + +/* +Copyright 2024 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sharedmain + +import ( + "os" + "path/filepath" + "sync" + "sync/atomic" + "testing" + "time" + + "go.uber.org/zap/zaptest" +) + +// TestShutdownCoordination_NormalSequence tests the normal shutdown sequence +// where queue-proxy writes drain signals and user container waits appropriately +func TestShutdownCoordination_NormalSequence(t *testing.T) { + tmpDir := t.TempDir() + drainDir := filepath.Join(tmpDir, "knative") + if err := os.MkdirAll(drainDir, 0755); err != nil { + t.Fatal(err) + } + + drainStarted := filepath.Join(drainDir, "drain-started") + drainComplete := filepath.Join(drainDir, "drain-complete") + + // Simulate queue-proxy PreStop + queueProxyPreStop := func() { + if err := os.WriteFile(drainStarted, []byte(""), 0600); err != nil { + t.Errorf("Failed to write drain-started: %v", err) + } + } + + // Simulate user container PreStop with exponential backoff + userPreStopStarted := make(chan struct{}) + userPreStopCompleted := make(chan struct{}) + + go func() { + close(userPreStopStarted) + // Simulate the actual PreStop script logic + for _, delay := range []int{1, 2, 4, 8} { + if _, err := os.Stat(drainStarted); err == nil { + // drain-started exists, wait for drain-complete + for i := 0; i < 30; i++ { // Max 30 seconds wait + if _, err := os.Stat(drainComplete); err == nil { + close(userPreStopCompleted) + return + } + time.Sleep(1 * time.Second) + } + } + time.Sleep(time.Duration(delay) * time.Second) + } + // Exit after retries + close(userPreStopCompleted) + }() + + // Wait for user PreStop to start + <-userPreStopStarted + + // Simulate some delay before queue-proxy writes the file + time.Sleep(500 * time.Millisecond) + + // Execute queue-proxy PreStop + queueProxyPreStop() + + // Simulate queue-proxy draining and writing complete signal + time.Sleep(2 * time.Second) + if err := os.WriteFile(drainComplete, []byte(""), 0600); err != nil { + t.Fatal(err) + } + + // Wait for user PreStop to complete + select { + case <-userPreStopCompleted: + // Success + case <-time.After(20 * time.Second): + t.Fatal("User PreStop did not complete in time") + } + + // Verify both files exist + if _, err := os.Stat(drainStarted); os.IsNotExist(err) { + t.Error("drain-started file was not created") + } + if _, err := os.Stat(drainComplete); os.IsNotExist(err) { + t.Error("drain-complete file was not created") + } +} + +// TestShutdownCoordination_QueueProxyCrash tests behavior when queue-proxy +// crashes or fails to write the drain-started signal +func TestShutdownCoordination_QueueProxyCrash(t *testing.T) { + tmpDir := t.TempDir() + drainDir := filepath.Join(tmpDir, "knative") + if err := os.MkdirAll(drainDir, 0755); err != nil { + t.Fatal(err) + } + + drainStarted := filepath.Join(drainDir, "drain-started") + + // Simulate user container PreStop without queue-proxy creating the file + userPreStopCompleted := make(chan struct{}) + startTime := time.Now() + + go func() { + // Simulate the actual PreStop script logic + for _, delay := range []int{1, 2, 4, 8} { + if _, err := os.Stat(drainStarted); err == nil { + t.Error("drain-started should not exist in crash scenario") + } + time.Sleep(time.Duration(delay) * time.Second) + } + // Should exit after retries + close(userPreStopCompleted) + }() + + // Wait for user PreStop to complete + select { + case <-userPreStopCompleted: + elapsed := time.Since(startTime) + // Should complete after 1+2+4+8 = 15 seconds + if elapsed < 14*time.Second || elapsed > 16*time.Second { + t.Errorf("PreStop took %v, expected ~15s", elapsed) + } + case <-time.After(20 * time.Second): + t.Fatal("User PreStop did not complete after retries") + } +} + +// TestShutdownCoordination_HighLoad tests shutdown under high request load +func TestShutdownCoordination_HighLoad(t *testing.T) { + tmpDir := t.TempDir() + drainDir := filepath.Join(tmpDir, "knative") + if err := os.MkdirAll(drainDir, 0755); err != nil { + t.Fatal(err) + } + + drainStarted := filepath.Join(drainDir, "drain-started") + drainComplete := filepath.Join(drainDir, "drain-complete") + + // Simulate active requests + var pendingRequests int32 = 100 + var wg sync.WaitGroup + + // Simulate queue-proxy handling requests + for i := 0; i < 100; i++ { + wg.Add(1) + go func(id int) { + defer wg.Done() + // Simulate request processing + time.Sleep(time.Duration(100+id*10) * time.Millisecond) + atomic.AddInt32(&pendingRequests, -1) + }(i) + } + + // Start shutdown sequence + go func() { + // Write drain-started immediately + if err := os.WriteFile(drainStarted, []byte(""), 0600); err != nil { + t.Errorf("Failed to write drain-started: %v", err) + } + + // Wait for all requests to complete + for atomic.LoadInt32(&pendingRequests) > 0 { + time.Sleep(100 * time.Millisecond) + } + + // Write drain-complete after all requests are done + if err := os.WriteFile(drainComplete, []byte(""), 0600); err != nil { + t.Errorf("Failed to write drain-complete: %v", err) + } + }() + + // Simulate user container waiting + userPreStopCompleted := make(chan struct{}) + go func() { + for _, delay := range []int{1, 2, 4, 8} { + if _, err := os.Stat(drainStarted); err == nil { + // Wait for drain-complete + for i := 0; i < 30; i++ { + if _, err := os.Stat(drainComplete); err == nil { + close(userPreStopCompleted) + return + } + time.Sleep(1 * time.Second) + } + } + time.Sleep(time.Duration(delay) * time.Second) + } + t.Error("User PreStop exited without seeing drain-complete") + }() + + // Wait for all requests to complete + wg.Wait() + + // Ensure user PreStop completes + select { + case <-userPreStopCompleted: + // Verify no requests remain + if atomic.LoadInt32(&pendingRequests) != 0 { + t.Errorf("Requests remaining: %d", pendingRequests) + } + case <-time.After(30 * time.Second): + t.Fatal("User PreStop did not complete under load") + } +} + +// TestShutdownCoordination_FilePermissions tests behavior with file system issues +func TestShutdownCoordination_FilePermissions(t *testing.T) { + // Skip if not running as root (can't test permission issues properly) + if os.Geteuid() == 0 { + t.Skip("Cannot test permission issues as root") + } + + tmpDir := t.TempDir() + drainDir := filepath.Join(tmpDir, "knative") + if err := os.MkdirAll(drainDir, 0755); err != nil { + t.Fatal(err) + } + + // Make directory read-only + if err := os.Chmod(drainDir, 0555); err != nil { + t.Fatal(err) + } + defer os.Chmod(drainDir, 0755) // Restore for cleanup + + drainStarted := filepath.Join(drainDir, "drain-started") + + // Try to write drain-started (should fail) + err := os.WriteFile(drainStarted, []byte(""), 0600) + if err == nil { + t.Error("Expected write to fail with read-only directory") + } + + // User PreStop should still complete after retries + userPreStopCompleted := make(chan struct{}) + go func() { + for _, delay := range []int{1, 2, 4, 8} { + if _, err := os.Stat(drainStarted); err == nil { + t.Error("File should not exist with permission issues") + } + time.Sleep(time.Duration(delay) * time.Millisecond) // Use ms for faster test + } + close(userPreStopCompleted) + }() + + select { + case <-userPreStopCompleted: + // Success - PreStop completed despite permission issues + case <-time.After(5 * time.Second): + t.Fatal("User PreStop did not complete with permission issues") + } +} + +// TestShutdownCoordination_RaceCondition tests for race conditions +// between queue-proxy and user container PreStop hooks +func TestShutdownCoordination_RaceCondition(t *testing.T) { + tmpDir := t.TempDir() + drainDir := filepath.Join(tmpDir, "knative") + + // Run multiple iterations to catch race conditions + for i := 0; i < 50; i++ { + // Clean up from previous iteration + os.RemoveAll(drainDir) + if err := os.MkdirAll(drainDir, 0755); err != nil { + t.Fatal(err) + } + + drainStarted := filepath.Join(drainDir, "drain-started") + drainComplete := filepath.Join(drainDir, "drain-complete") + + var wg sync.WaitGroup + wg.Add(2) + + // Queue-proxy PreStop and shutdown + go func() { + defer wg.Done() + // Random delay to create race conditions + time.Sleep(time.Duration(i%10) * time.Millisecond) + os.WriteFile(drainStarted, []byte(""), 0600) + time.Sleep(time.Duration(i%5) * time.Millisecond) + os.WriteFile(drainComplete, []byte(""), 0600) + }() + + // User container PreStop + completed := make(chan bool, 1) + go func() { + defer wg.Done() + timeout := time.After(20 * time.Second) + for _, delay := range []int{1, 2, 4, 8} { + select { + case <-timeout: + completed <- false + return + default: + } + + if _, err := os.Stat(drainStarted); err == nil { + // Wait for complete + for j := 0; j < 30; j++ { + if _, err := os.Stat(drainComplete); err == nil { + completed <- true + return + } + time.Sleep(100 * time.Millisecond) + } + } + time.Sleep(time.Duration(delay) * time.Millisecond) + } + completed <- true // Exit after retries + }() + + // Wait for both to complete + wg.Wait() + + if !<-completed { + t.Errorf("Iteration %d: User PreStop timed out", i) + } + } +} + +// TestShutdownCoordination_LongRunningRequests tests behavior with +// requests that take longer than the grace period +func TestShutdownCoordination_LongRunningRequests(t *testing.T) { + logger := zaptest.NewLogger(t).Sugar() + + tmpDir := t.TempDir() + drainDir := filepath.Join(tmpDir, "knative") + if err := os.MkdirAll(drainDir, 0755); err != nil { + t.Fatal(err) + } + + drainStarted := filepath.Join(drainDir, "drain-started") + drainComplete := filepath.Join(drainDir, "drain-complete") + + // Simulate a long-running request + requestComplete := make(chan struct{}) + go func() { + logger.Info("Starting long-running request") + time.Sleep(10 * time.Second) // Longer than typical drain timeout + close(requestComplete) + logger.Info("Long-running request completed") + }() + + // Start shutdown + go func() { + os.WriteFile(drainStarted, []byte(""), 0600) + + // In real scenario, this would wait for requests or timeout + select { + case <-requestComplete: + logger.Info("Request completed, writing drain-complete") + case <-time.After(5 * time.Second): + logger.Info("Timeout waiting for request, writing drain-complete anyway") + } + + os.WriteFile(drainComplete, []byte(""), 0600) + }() + + // User container should still proceed + userExited := make(chan struct{}) + go func() { + for _, delay := range []int{1, 2, 4, 8} { + if _, err := os.Stat(drainStarted); err == nil { + // Wait for complete with timeout + for i := 0; i < 10; i++ { + if _, err := os.Stat(drainComplete); err == nil { + close(userExited) + return + } + time.Sleep(1 * time.Second) + } + } + time.Sleep(time.Duration(delay) * time.Second) + } + close(userExited) + }() + + select { + case <-userExited: + // User container should exit even with long-running request + case <-time.After(30 * time.Second): + t.Fatal("User container did not exit with long-running request") + } +} diff --git a/pkg/reconciler/revision/resources/deploy.go b/pkg/reconciler/revision/resources/deploy.go index d2a4308efa5c..9f103527bba6 100644 --- a/pkg/reconciler/revision/resources/deploy.go +++ b/pkg/reconciler/revision/resources/deploy.go @@ -30,6 +30,7 @@ import ( v1 "knative.dev/serving/pkg/apis/serving/v1" "knative.dev/serving/pkg/networking" "knative.dev/serving/pkg/queue" + "knative.dev/serving/pkg/queue/drain" "knative.dev/serving/pkg/reconciler/revision/config" "knative.dev/serving/pkg/reconciler/revision/resources/names" @@ -322,7 +323,7 @@ func buildLifecycleWithDrainWait(existingLifecycle *corev1.Lifecycle) *corev1.Li Exec: &corev1.ExecAction{ Command: []string{ "/bin/sh", "-c", - existingCommand + "; until [ -f /var/run/knative/drain-complete ]; do sleep 0.1; done", + drain.BuildDrainWaitScript(existingCommand), }, }, }, @@ -335,7 +336,7 @@ func buildLifecycleWithDrainWait(existingLifecycle *corev1.Lifecycle) *corev1.Li Exec: &corev1.ExecAction{ Command: []string{ "/bin/sh", "-c", - "until [ -f /var/run/knative/drain-complete ]; do sleep 0.1; done", + drain.BuildDrainWaitScript(""), }, }, }, diff --git a/pkg/reconciler/revision/resources/deploy_lifecycle_test.go b/pkg/reconciler/revision/resources/deploy_lifecycle_test.go index 0a04e43e58e6..3551fcef6338 100644 --- a/pkg/reconciler/revision/resources/deploy_lifecycle_test.go +++ b/pkg/reconciler/revision/resources/deploy_lifecycle_test.go @@ -21,10 +21,12 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/intstr" + "knative.dev/serving/pkg/queue/drain" ) func TestBuildLifecycleWithDrainWait(t *testing.T) { - drainCommand := "until [ -f /var/run/knative/drain-complete ]; do sleep 0.1; done" + // Use the same logic as production code + drainCommand := drain.BuildDrainWaitScript("") tests := []struct { name string @@ -45,7 +47,7 @@ func TestBuildLifecycleWithDrainWait(t *testing.T) { }, }, }, - want: []string{"/bin/sh", "-c", "/app/cleanup.sh; " + drainCommand}, + want: []string{"/bin/sh", "-c", drain.BuildDrainWaitScript("/app/cleanup.sh")}, }, { name: "existing HTTP GET", @@ -57,7 +59,7 @@ func TestBuildLifecycleWithDrainWait(t *testing.T) { }, }, }, - want: []string{"/bin/sh", "-c", "curl -f http://localhost:8080/shutdown; " + drainCommand}, + want: []string{"/bin/sh", "-c", drain.BuildDrainWaitScript("curl -f http://localhost:8080/shutdown")}, }, { name: "existing HTTP GET without path", @@ -68,7 +70,7 @@ func TestBuildLifecycleWithDrainWait(t *testing.T) { }, }, }, - want: []string{"/bin/sh", "-c", "curl -f http://localhost:9090/; " + drainCommand}, + want: []string{"/bin/sh", "-c", drain.BuildDrainWaitScript("curl -f http://localhost:9090/")}, }, } diff --git a/pkg/reconciler/revision/resources/deploy_test.go b/pkg/reconciler/revision/resources/deploy_test.go index 122e68e8de7d..012adc9406fa 100644 --- a/pkg/reconciler/revision/resources/deploy_test.go +++ b/pkg/reconciler/revision/resources/deploy_test.go @@ -95,6 +95,16 @@ var ( FailureThreshold: 1, }, SecurityContext: queueSecurityContext, + Lifecycle: &corev1.Lifecycle{ + PreStop: &corev1.LifecycleHandler{ + Exec: &corev1.ExecAction{ + Command: []string{ + "/bin/sh", "-c", + "touch " + "/var/run/knative/drain-started", // Using string directly to match production + }, + }, + }, + }, Env: []corev1.EnvVar{{ Name: "SERVING_NAMESPACE", Value: "foo", // matches namespace diff --git a/pkg/reconciler/revision/resources/queue.go b/pkg/reconciler/revision/resources/queue.go index cf05b83eaf40..b3819cf388e6 100644 --- a/pkg/reconciler/revision/resources/queue.go +++ b/pkg/reconciler/revision/resources/queue.go @@ -40,6 +40,7 @@ import ( "knative.dev/serving/pkg/deployment" "knative.dev/serving/pkg/networking" "knative.dev/serving/pkg/queue" + "knative.dev/serving/pkg/queue/drain" "knative.dev/serving/pkg/queue/readiness" "knative.dev/serving/pkg/reconciler/revision/config" ) @@ -363,6 +364,16 @@ func makeQueueContainer(rev *v1.Revision, cfg *config.Config) (*corev1.Container StartupProbe: nil, ReadinessProbe: queueProxyReadinessProbe, SecurityContext: queueSecurityContext, + Lifecycle: &corev1.Lifecycle{ + PreStop: &corev1.LifecycleHandler{ + Exec: &corev1.ExecAction{ + Command: []string{ + "/bin/sh", "-c", + drain.QueueProxyPreStopScript, + }, + }, + }, + }, Env: []corev1.EnvVar{{ Name: "SERVING_NAMESPACE", Value: rev.Namespace, From eab43dea54a538466abba966e580f8a59a4282f1 Mon Sep 17 00:00:00 2001 From: Elijah Roussos Date: Tue, 30 Sep 2025 12:48:33 -0400 Subject: [PATCH 09/13] fix: inconsistent vendoring --- .../v2/internal/httprule/BUILD.bazel | 35 +++++ .../grpc-gateway/v2/runtime/BUILD.bazel | 97 ++++++++++++++ .../grpc-gateway/v2/utilities/BUILD.bazel | 31 +++++ .../k8s.io/apimachinery/pkg/api/errors/OWNERS | 16 +++ .../k8s.io/apimachinery/pkg/api/meta/OWNERS | 15 +++ .../apimachinery/pkg/api/resource/OWNERS | 10 ++ .../apimachinery/pkg/api/validation/OWNERS | 11 ++ .../apimachinery/pkg/apis/meta/v1/OWNERS | 17 +++ .../apimachinery/pkg/util/mergepatch/OWNERS | 6 + .../pkg/util/strategicpatch/OWNERS | 9 ++ .../apimachinery/pkg/util/validation/OWNERS | 11 ++ .../third_party/forked/golang/json/OWNERS | 6 + .../client-go/applyconfigurations/OWNERS | 6 + vendor/k8s.io/client-go/openapi/OWNERS | 4 + .../pkg/apis/clientauthentication/OWNERS | 8 ++ vendor/k8s.io/client-go/rest/OWNERS | 14 ++ vendor/k8s.io/client-go/tools/auth/OWNERS | 8 ++ vendor/k8s.io/client-go/tools/cache/OWNERS | 27 ++++ .../client-go/tools/leaderelection/OWNERS | 13 ++ vendor/k8s.io/client-go/tools/metrics/OWNERS | 5 + vendor/k8s.io/client-go/tools/record/OWNERS | 6 + vendor/k8s.io/client-go/transport/OWNERS | 8 ++ vendor/k8s.io/client-go/util/cert/OWNERS | 8 ++ vendor/k8s.io/client-go/util/keyutil/OWNERS | 6 + vendor/k8s.io/client-go/util/retry/OWNERS | 4 + vendor/k8s.io/code-generator/OWNERS | 16 +++ .../code-generator/cmd/client-gen/OWNERS | 11 ++ .../code-generator/cmd/go-to-protobuf/OWNERS | 6 + vendor/k8s.io/klog/OWNERS | 19 +++ vendor/k8s.io/klog/v2/OWNERS | 16 +++ .../kube-openapi/pkg/generators/rules/OWNERS | 4 + .../k8s.io/kube-openapi/pkg/util/proto/OWNERS | 2 + vendor/k8s.io/utils/pointer/OWNERS | 10 ++ vendor/k8s.io/utils/ptr/OWNERS | 10 ++ vendor/knative.dev/hack/OWNERS | 8 ++ vendor/knative.dev/hack/OWNERS_ALIASES | 126 ++++++++++++++++++ vendor/knative.dev/pkg/apis/OWNERS | 15 +++ vendor/knative.dev/pkg/apis/duck/OWNERS | 8 ++ vendor/knative.dev/pkg/controller/OWNERS | 7 + .../knative.dev/pkg/controller/controller.go | 2 +- .../pkg/controller/queue_metrics.go | 8 +- .../pkg/controller/two_lane_queue.go | 4 +- vendor/knative.dev/pkg/hack/update-codegen.sh | 1 - vendor/knative.dev/pkg/hack/verify-codegen.sh | 5 - vendor/knative.dev/pkg/network/transports.go | 3 +- vendor/knative.dev/pkg/reconciler/OWNERS | 7 + vendor/knative.dev/pkg/resolver/OWNERS | 8 ++ vendor/knative.dev/pkg/test/OWNERS | 10 ++ vendor/knative.dev/pkg/webhook/OWNERS | 7 + vendor/modules.txt | 6 +- vendor/sigs.k8s.io/json/OWNERS | 6 + vendor/sigs.k8s.io/randfill/OWNERS | 8 ++ vendor/sigs.k8s.io/randfill/OWNERS_ALIASES | 14 ++ vendor/sigs.k8s.io/yaml/OWNERS | 23 ++++ 54 files changed, 735 insertions(+), 16 deletions(-) create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel create mode 100644 vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validation/OWNERS create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS create mode 100644 vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS create mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS create mode 100644 vendor/k8s.io/apimachinery/pkg/util/validation/OWNERS create mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS create mode 100644 vendor/k8s.io/client-go/applyconfigurations/OWNERS create mode 100644 vendor/k8s.io/client-go/openapi/OWNERS create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS create mode 100644 vendor/k8s.io/client-go/rest/OWNERS create mode 100644 vendor/k8s.io/client-go/tools/auth/OWNERS create mode 100644 vendor/k8s.io/client-go/tools/cache/OWNERS create mode 100644 vendor/k8s.io/client-go/tools/leaderelection/OWNERS create mode 100644 vendor/k8s.io/client-go/tools/metrics/OWNERS create mode 100644 vendor/k8s.io/client-go/tools/record/OWNERS create mode 100644 vendor/k8s.io/client-go/transport/OWNERS create mode 100644 vendor/k8s.io/client-go/util/cert/OWNERS create mode 100644 vendor/k8s.io/client-go/util/keyutil/OWNERS create mode 100644 vendor/k8s.io/client-go/util/retry/OWNERS create mode 100644 vendor/k8s.io/code-generator/OWNERS create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/OWNERS create mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/OWNERS create mode 100644 vendor/k8s.io/klog/OWNERS create mode 100644 vendor/k8s.io/klog/v2/OWNERS create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/rules/OWNERS create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS create mode 100644 vendor/k8s.io/utils/pointer/OWNERS create mode 100644 vendor/k8s.io/utils/ptr/OWNERS create mode 100644 vendor/knative.dev/hack/OWNERS create mode 100644 vendor/knative.dev/hack/OWNERS_ALIASES create mode 100644 vendor/knative.dev/pkg/apis/OWNERS create mode 100644 vendor/knative.dev/pkg/apis/duck/OWNERS create mode 100644 vendor/knative.dev/pkg/controller/OWNERS create mode 100644 vendor/knative.dev/pkg/reconciler/OWNERS create mode 100644 vendor/knative.dev/pkg/resolver/OWNERS create mode 100644 vendor/knative.dev/pkg/test/OWNERS create mode 100644 vendor/knative.dev/pkg/webhook/OWNERS create mode 100644 vendor/sigs.k8s.io/json/OWNERS create mode 100644 vendor/sigs.k8s.io/randfill/OWNERS create mode 100644 vendor/sigs.k8s.io/randfill/OWNERS_ALIASES create mode 100644 vendor/sigs.k8s.io/yaml/OWNERS diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel new file mode 100644 index 000000000000..b8fbb2b77c40 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel @@ -0,0 +1,35 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package(default_visibility = ["//visibility:public"]) + +go_library( + name = "httprule", + srcs = [ + "compile.go", + "parse.go", + "types.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule", + deps = ["//utilities"], +) + +go_test( + name = "httprule_test", + size = "small", + srcs = [ + "compile_test.go", + "parse_test.go", + "types_test.go", + ], + embed = [":httprule"], + deps = [ + "//utilities", + "@org_golang_google_grpc//grpclog", + ], +) + +alias( + name = "go_default_library", + actual = ":httprule", + visibility = ["//:__subpackages__"], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel new file mode 100644 index 000000000000..a65d88eb8658 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel @@ -0,0 +1,97 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package(default_visibility = ["//visibility:public"]) + +go_library( + name = "runtime", + srcs = [ + "context.go", + "convert.go", + "doc.go", + "errors.go", + "fieldmask.go", + "handler.go", + "marshal_httpbodyproto.go", + "marshal_json.go", + "marshal_jsonpb.go", + "marshal_proto.go", + "marshaler.go", + "marshaler_registry.go", + "mux.go", + "pattern.go", + "proto2_convert.go", + "query.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/runtime", + deps = [ + "//internal/httprule", + "//utilities", + "@org_golang_google_genproto_googleapis_api//httpbody", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//grpclog", + "@org_golang_google_grpc//health/grpc_health_v1", + "@org_golang_google_grpc//metadata", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//encoding/protojson", + "@org_golang_google_protobuf//proto", + "@org_golang_google_protobuf//reflect/protoreflect", + "@org_golang_google_protobuf//reflect/protoregistry", + "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_google_protobuf//types/known/fieldmaskpb", + "@org_golang_google_protobuf//types/known/structpb", + "@org_golang_google_protobuf//types/known/timestamppb", + "@org_golang_google_protobuf//types/known/wrapperspb", + ], +) + +go_test( + name = "runtime_test", + size = "small", + srcs = [ + "context_test.go", + "convert_test.go", + "errors_test.go", + "fieldmask_test.go", + "handler_test.go", + "marshal_httpbodyproto_test.go", + "marshal_json_test.go", + "marshal_jsonpb_test.go", + "marshal_proto_test.go", + "marshaler_registry_test.go", + "mux_internal_test.go", + "mux_test.go", + "pattern_test.go", + "query_fuzz_test.go", + "query_test.go", + ], + embed = [":runtime"], + deps = [ + "//runtime/internal/examplepb", + "//utilities", + "@com_github_google_go_cmp//cmp", + "@com_github_google_go_cmp//cmp/cmpopts", + "@org_golang_google_genproto_googleapis_api//httpbody", + "@org_golang_google_genproto_googleapis_rpc//errdetails", + "@org_golang_google_genproto_googleapis_rpc//status", + "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//health/grpc_health_v1", + "@org_golang_google_grpc//metadata", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//encoding/protojson", + "@org_golang_google_protobuf//proto", + "@org_golang_google_protobuf//testing/protocmp", + "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_google_protobuf//types/known/emptypb", + "@org_golang_google_protobuf//types/known/fieldmaskpb", + "@org_golang_google_protobuf//types/known/structpb", + "@org_golang_google_protobuf//types/known/timestamppb", + "@org_golang_google_protobuf//types/known/wrapperspb", + ], +) + +alias( + name = "go_default_library", + actual = ":runtime", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel new file mode 100644 index 000000000000..b89409465773 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package(default_visibility = ["//visibility:public"]) + +go_library( + name = "utilities", + srcs = [ + "doc.go", + "pattern.go", + "readerfactory.go", + "string_array_flag.go", + "trie.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/utilities", +) + +go_test( + name = "utilities_test", + size = "small", + srcs = [ + "string_array_flag_test.go", + "trie_test.go", + ], + deps = [":utilities"], +) + +alias( + name = "go_default_library", + actual = ":utilities", + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS new file mode 100644 index 000000000000..1a9f5e7706b5 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS @@ -0,0 +1,16 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: + - thockin + - smarterclayton + - wojtek-t + - deads2k + - derekwaynecarr + - caesarxuchao + - mikedanese + - liggitt + - saad-ali + - janetkuo + - tallclair + - dims + - cjcullen diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS new file mode 100644 index 000000000000..3bd8bf535e65 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS @@ -0,0 +1,15 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: + - thockin + - smarterclayton + - wojtek-t + - deads2k + - derekwaynecarr + - caesarxuchao + - mikedanese + - liggitt + - janetkuo + - dims +emeritus_reviewers: + - ncdc diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS new file mode 100644 index 000000000000..063fd285dad1 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: + - thockin + - smarterclayton + - wojtek-t + - derekwaynecarr + - mikedanese + - saad-ali + - janetkuo diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/validation/OWNERS new file mode 100644 index 000000000000..40237324761f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/validation/OWNERS @@ -0,0 +1,11 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +# Disable inheritance as this is an api owners file +options: + no_parent_owners: true +approvers: + - api-approvers +reviewers: + - api-reviewers +labels: + - kind/api-change diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS new file mode 100644 index 000000000000..ec414a84b919 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS @@ -0,0 +1,17 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: + - thockin + - smarterclayton + - wojtek-t + - deads2k + - caesarxuchao + - liggitt + - sttts + - luxas + - janetkuo + - justinsb + - soltysh + - dims +emeritus_reviewers: + - ncdc diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS new file mode 100644 index 000000000000..349bc69d6582 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - pwittrock +reviewers: + - apelisse diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS new file mode 100644 index 000000000000..73244449f2c0 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS @@ -0,0 +1,9 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - apelisse + - pwittrock +reviewers: + - apelisse +emeritus_approvers: + - mengqiy diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/validation/OWNERS new file mode 100644 index 000000000000..40237324761f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/OWNERS @@ -0,0 +1,11 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +# Disable inheritance as this is an api owners file +options: + no_parent_owners: true +approvers: + - api-approvers +reviewers: + - api-reviewers +labels: + - kind/api-change diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS new file mode 100644 index 000000000000..349bc69d6582 --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - pwittrock +reviewers: + - apelisse diff --git a/vendor/k8s.io/client-go/applyconfigurations/OWNERS b/vendor/k8s.io/client-go/applyconfigurations/OWNERS new file mode 100644 index 000000000000..de4067fd35f9 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - apelisse + - jpbetz + - api-approvers diff --git a/vendor/k8s.io/client-go/openapi/OWNERS b/vendor/k8s.io/client-go/openapi/OWNERS new file mode 100644 index 000000000000..e61009424261 --- /dev/null +++ b/vendor/k8s.io/client-go/openapi/OWNERS @@ -0,0 +1,4 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - apelisse diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS new file mode 100644 index 000000000000..4dfbb98aec85 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS @@ -0,0 +1,8 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +# approval on api packages bubbles to api-approvers +reviewers: + - sig-auth-authenticators-approvers + - sig-auth-authenticators-reviewers +labels: + - sig/auth diff --git a/vendor/k8s.io/client-go/rest/OWNERS b/vendor/k8s.io/client-go/rest/OWNERS new file mode 100644 index 000000000000..7b23294c45e5 --- /dev/null +++ b/vendor/k8s.io/client-go/rest/OWNERS @@ -0,0 +1,14 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: + - thockin + - smarterclayton + - caesarxuchao + - wojtek-t + - deads2k + - liggitt + - sttts + - luxas + - dims + - cjcullen + - lojies diff --git a/vendor/k8s.io/client-go/tools/auth/OWNERS b/vendor/k8s.io/client-go/tools/auth/OWNERS new file mode 100644 index 000000000000..c4ea6463df4d --- /dev/null +++ b/vendor/k8s.io/client-go/tools/auth/OWNERS @@ -0,0 +1,8 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - sig-auth-authenticators-approvers +reviewers: + - sig-auth-authenticators-reviewers +labels: + - sig/auth diff --git a/vendor/k8s.io/client-go/tools/cache/OWNERS b/vendor/k8s.io/client-go/tools/cache/OWNERS new file mode 100644 index 000000000000..fc441e0efa8f --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/OWNERS @@ -0,0 +1,27 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - thockin + - smarterclayton + - wojtek-t + - deads2k + - caesarxuchao + - liggitt +reviewers: + - thockin + - smarterclayton + - wojtek-t + - deads2k + - derekwaynecarr + - caesarxuchao + - mikedanese + - liggitt + - janetkuo + - justinsb + - soltysh + - jsafrane + - dims + - ingvagabund +emeritus_approvers: + - lavalamp + - ncdc diff --git a/vendor/k8s.io/client-go/tools/leaderelection/OWNERS b/vendor/k8s.io/client-go/tools/leaderelection/OWNERS new file mode 100644 index 000000000000..70787f2b524d --- /dev/null +++ b/vendor/k8s.io/client-go/tools/leaderelection/OWNERS @@ -0,0 +1,13 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - mikedanese + - jefftree +reviewers: + - wojtek-t + - deads2k + - mikedanese + - ingvagabund + - jefftree +emeritus_approvers: + - timothysc diff --git a/vendor/k8s.io/client-go/tools/metrics/OWNERS b/vendor/k8s.io/client-go/tools/metrics/OWNERS new file mode 100644 index 000000000000..2c9488a5fb2c --- /dev/null +++ b/vendor/k8s.io/client-go/tools/metrics/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: + - wojtek-t + - jayunit100 diff --git a/vendor/k8s.io/client-go/tools/record/OWNERS b/vendor/k8s.io/client-go/tools/record/OWNERS new file mode 100644 index 000000000000..8105c4fe087c --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: + - sig-instrumentation-reviewers +approvers: + - sig-instrumentation-approvers diff --git a/vendor/k8s.io/client-go/transport/OWNERS b/vendor/k8s.io/client-go/transport/OWNERS new file mode 100644 index 000000000000..34adee5ec539 --- /dev/null +++ b/vendor/k8s.io/client-go/transport/OWNERS @@ -0,0 +1,8 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: + - smarterclayton + - wojtek-t + - deads2k + - liggitt + - caesarxuchao diff --git a/vendor/k8s.io/client-go/util/cert/OWNERS b/vendor/k8s.io/client-go/util/cert/OWNERS new file mode 100644 index 000000000000..3c3b94c58c3f --- /dev/null +++ b/vendor/k8s.io/client-go/util/cert/OWNERS @@ -0,0 +1,8 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - sig-auth-certificates-approvers +reviewers: + - sig-auth-certificates-reviewers +labels: + - sig/auth diff --git a/vendor/k8s.io/client-go/util/keyutil/OWNERS b/vendor/k8s.io/client-go/util/keyutil/OWNERS new file mode 100644 index 000000000000..e6d229d5dbe1 --- /dev/null +++ b/vendor/k8s.io/client-go/util/keyutil/OWNERS @@ -0,0 +1,6 @@ +approvers: + - sig-auth-certificates-approvers +reviewers: + - sig-auth-certificates-reviewers +labels: + - sig/auth diff --git a/vendor/k8s.io/client-go/util/retry/OWNERS b/vendor/k8s.io/client-go/util/retry/OWNERS new file mode 100644 index 000000000000..75736b5aace8 --- /dev/null +++ b/vendor/k8s.io/client-go/util/retry/OWNERS @@ -0,0 +1,4 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: + - caesarxuchao diff --git a/vendor/k8s.io/code-generator/OWNERS b/vendor/k8s.io/code-generator/OWNERS new file mode 100644 index 000000000000..d16e47e85d28 --- /dev/null +++ b/vendor/k8s.io/code-generator/OWNERS @@ -0,0 +1,16 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - deads2k + - jpbetz + - wojtek-t + - sttts +reviewers: + - deads2k + - wojtek-t + - sttts +labels: + - sig/api-machinery + - area/code-generation +emeritus_approvers: + - lavalamp diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/OWNERS b/vendor/k8s.io/code-generator/cmd/client-gen/OWNERS new file mode 100644 index 000000000000..967eb2a7bbc3 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/OWNERS @@ -0,0 +1,11 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - wojtek-t + - caesarxuchao +reviewers: + - wojtek-t + - caesarxuchao + - jpbetz +emeritus_approvers: + - lavalamp diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/OWNERS b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/OWNERS new file mode 100644 index 000000000000..af7e2ec4c7d3 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - smarterclayton +reviewers: + - smarterclayton diff --git a/vendor/k8s.io/klog/OWNERS b/vendor/k8s.io/klog/OWNERS new file mode 100644 index 000000000000..380e514f2807 --- /dev/null +++ b/vendor/k8s.io/klog/OWNERS @@ -0,0 +1,19 @@ +# See the OWNERS docs at https://go.k8s.io/owners +reviewers: + - jayunit100 + - hoegaarden + - andyxning + - neolit123 + - pohly + - yagonobre + - vincepri + - detiber +approvers: + - dims + - thockin + - justinsb + - tallclair + - piosz + - brancz + - DirectXMan12 + - lavalamp diff --git a/vendor/k8s.io/klog/v2/OWNERS b/vendor/k8s.io/klog/v2/OWNERS new file mode 100644 index 000000000000..7500475a64af --- /dev/null +++ b/vendor/k8s.io/klog/v2/OWNERS @@ -0,0 +1,16 @@ +# See the OWNERS docs at https://go.k8s.io/owners +reviewers: + - harshanarayana + - mengjiao-liu + - pohly +approvers: + - dims + - pohly + - thockin +emeritus_approvers: + - brancz + - justinsb + - lavalamp + - piosz + - serathius + - tallclair diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/rules/OWNERS b/vendor/k8s.io/kube-openapi/pkg/generators/rules/OWNERS new file mode 100644 index 000000000000..235bc545b88b --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/generators/rules/OWNERS @@ -0,0 +1,4 @@ +reviewers: +- roycaihw +approvers: +- roycaihw diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS b/vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS new file mode 100644 index 000000000000..9621a6a3a4ac --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS @@ -0,0 +1,2 @@ +approvers: +- apelisse diff --git a/vendor/k8s.io/utils/pointer/OWNERS b/vendor/k8s.io/utils/pointer/OWNERS new file mode 100644 index 000000000000..0d6392752af2 --- /dev/null +++ b/vendor/k8s.io/utils/pointer/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- apelisse +- stewart-yu +- thockin +reviewers: +- apelisse +- stewart-yu +- thockin diff --git a/vendor/k8s.io/utils/ptr/OWNERS b/vendor/k8s.io/utils/ptr/OWNERS new file mode 100644 index 000000000000..0d6392752af2 --- /dev/null +++ b/vendor/k8s.io/utils/ptr/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- apelisse +- stewart-yu +- thockin +reviewers: +- apelisse +- stewart-yu +- thockin diff --git a/vendor/knative.dev/hack/OWNERS b/vendor/knative.dev/hack/OWNERS new file mode 100644 index 000000000000..4d20bf8cffef --- /dev/null +++ b/vendor/knative.dev/hack/OWNERS @@ -0,0 +1,8 @@ +approvers: + - technical-oversight-committee + - productivity-writers + - knative-release-leads + +reviewers: + - productivity-writers + - productivity-reviewers diff --git a/vendor/knative.dev/hack/OWNERS_ALIASES b/vendor/knative.dev/hack/OWNERS_ALIASES new file mode 100644 index 000000000000..f2e303479b6d --- /dev/null +++ b/vendor/knative.dev/hack/OWNERS_ALIASES @@ -0,0 +1,126 @@ +# This file is auto-generated from peribolos. +# Do not modify this file, instead modify peribolos/knative.yaml + +aliases: + client-reviewers: [] + client-wg-leads: + - dsimansk + client-writers: + - dsimansk + docs-reviewers: + - nainaz + - skonto + docs-writers: + - skonto + eventing-reviewers: + - Leo6Leo + - aslom + - cali0707 + - creydr + eventing-wg-leads: + - creydr + - pierDipi + eventing-writers: + - Leo6Leo + - aliok + - cali0707 + - creydr + - matzew + - pierDipi + func-reviewers: + - jrangelramos + - nainaz + func-writers: + - gauron99 + - jrangelramos + - lkingland + - matejvasek + - matzew + - salaboy + functions-wg-leads: + - lkingland + - salaboy + knative-admin: + - aliok + - arsenetar + - cardil + - dprotaso + - dsimansk + - evankanderson + - gauron99 + - knative-automation + - knative-prow-releaser-robot + - knative-prow-robot + - knative-prow-updater-robot + - knative-test-reporter-robot + - matzew + - skonto + - upodroid + knative-release-leads: + - dprotaso + - dsimansk + - gauron99 + - skonto + knative-robots: + - knative-automation + - knative-prow-releaser-robot + - knative-prow-robot + - knative-prow-updater-robot + - knative-test-reporter-robot + operations-reviewers: + - aliok + - houshengbo + - matzew + operations-wg-leads: + - houshengbo + operations-writers: + - aliok + - houshengbo + - matzew + productivity-leads: + - cardil + - upodroid + productivity-reviewers: + - evankanderson + - mgencur + productivity-wg-leads: + - cardil + - upodroid + productivity-writers: + - cardil + - upodroid + security-wg-leads: + - davidhadas + - evankanderson + security-writers: + - davidhadas + - evankanderson + serving-approvers: + - dsimansk + - skonto + serving-reviewers: + - skonto + serving-triage: + - skonto + serving-wg-leads: + - dprotaso + serving-writers: + - dprotaso + - dsimansk + - skonto + steering-committee: + - aliok + - arsenetar + - dprotaso + - evankanderson + - matzew + ux-wg-leads: + - Leo6Leo + - cali0707 + - mmejia02 + - zainabhusain227 + ux-writers: + - Leo6Leo + - cali0707 + - mmejia02 + - zainabhusain227 diff --git a/vendor/knative.dev/pkg/apis/OWNERS b/vendor/knative.dev/pkg/apis/OWNERS new file mode 100644 index 000000000000..13014203fc86 --- /dev/null +++ b/vendor/knative.dev/pkg/apis/OWNERS @@ -0,0 +1,15 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- technical-oversight-committee +- serving-wg-leads +- eventing-wg-leads + +reviewers: +- serving-writers +- eventing-writers +- eventing-reviewers +- serving-reviewers + +options: + no_parent_owners: true diff --git a/vendor/knative.dev/pkg/apis/duck/OWNERS b/vendor/knative.dev/pkg/apis/duck/OWNERS new file mode 100644 index 000000000000..af1eb05dac49 --- /dev/null +++ b/vendor/knative.dev/pkg/apis/duck/OWNERS @@ -0,0 +1,8 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- eventing-wg-leads + +reviewers: +- eventing-reviewers +- eventing-writers diff --git a/vendor/knative.dev/pkg/controller/OWNERS b/vendor/knative.dev/pkg/controller/OWNERS new file mode 100644 index 000000000000..64660c9e35d3 --- /dev/null +++ b/vendor/knative.dev/pkg/controller/OWNERS @@ -0,0 +1,7 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- serving-writers + +reviewers: +- serving-reviewers diff --git a/vendor/knative.dev/pkg/controller/controller.go b/vendor/knative.dev/pkg/controller/controller.go index 7846399edf67..6091f55fd9b4 100644 --- a/vendor/knative.dev/pkg/controller/controller.go +++ b/vendor/knative.dev/pkg/controller/controller.go @@ -470,7 +470,7 @@ func (c *Impl) RunContext(ctx context.Context, threadiness int) error { } // Launch workers to process resources that get enqueued to our workqueue. - c.logger.Info("Starting controller and workers") + c.logger.Infow("Starting controller and workers", zap.Int("threadiness", threadiness)) for range threadiness { sg.Add(1) go func() { diff --git a/vendor/knative.dev/pkg/controller/queue_metrics.go b/vendor/knative.dev/pkg/controller/queue_metrics.go index dcd5889981ad..2e61e330ea51 100644 --- a/vendor/knative.dev/pkg/controller/queue_metrics.go +++ b/vendor/knative.dev/pkg/controller/queue_metrics.go @@ -88,13 +88,15 @@ func (m *queueMetrics) get(item any) { m.mu.Lock() defer m.mu.Unlock() - m.depth.Dec() - m.processingStartTimes[item] = m.clock.Now() - if startTime, exists := m.addTimes[item]; exists { + m.depth.Dec() m.latency.Observe(m.sinceInSeconds(startTime)) delete(m.addTimes, item) } + + if _, exists := m.processingStartTimes[item]; !exists { + m.processingStartTimes[item] = m.clock.Now() + } } func (m *queueMetrics) done(item any) { diff --git a/vendor/knative.dev/pkg/controller/two_lane_queue.go b/vendor/knative.dev/pkg/controller/two_lane_queue.go index 294255131887..0c1879ded805 100644 --- a/vendor/knative.dev/pkg/controller/two_lane_queue.go +++ b/vendor/knative.dev/pkg/controller/two_lane_queue.go @@ -223,9 +223,9 @@ func (q *twoLaneRateLimitingQueue) slowLane() workqueue.TypedInterface[any] { // It gets the item from fast lane if it has anything, alternatively // the slow lane. func (tlq *twoLaneQueue) Get() (any, bool) { - item, ok := tlq.consumerQueue.Get() + item, shutdown := tlq.consumerQueue.Get() tlq.metrics.get(item) - return item, ok + return item, shutdown } // Len returns the sum of lengths. diff --git a/vendor/knative.dev/pkg/hack/update-codegen.sh b/vendor/knative.dev/pkg/hack/update-codegen.sh index 7e3ad4abd447..836c5ddadd76 100644 --- a/vendor/knative.dev/pkg/hack/update-codegen.sh +++ b/vendor/knative.dev/pkg/hack/update-codegen.sh @@ -73,7 +73,6 @@ go run k8s.io/code-generator/cmd/deepcopy-gen \ knative.dev/pkg/apis/duck/v1 \ knative.dev/pkg/tracker \ knative.dev/pkg/logging \ - knative.dev/pkg/metrics \ knative.dev/pkg/testing \ knative.dev/pkg/testing/duck \ knative.dev/pkg/webhook/resourcesemantics/conversion/internal diff --git a/vendor/knative.dev/pkg/hack/verify-codegen.sh b/vendor/knative.dev/pkg/hack/verify-codegen.sh index 59fbeea852f3..f5d36632501f 100644 --- a/vendor/knative.dev/pkg/hack/verify-codegen.sh +++ b/vendor/knative.dev/pkg/hack/verify-codegen.sh @@ -37,7 +37,6 @@ cp -aR \ "${REPO_ROOT_DIR}/go.sum" \ "${REPO_ROOT_DIR}/apis" \ "${REPO_ROOT_DIR}/logging" \ - "${REPO_ROOT_DIR}/metrics" \ "${REPO_ROOT_DIR}/testing" \ "${REPO_ROOT_DIR}/vendor" \ "${TMP_DIFFROOT}" @@ -55,9 +54,6 @@ diff -Naupr --no-dereference \ diff -Naupr --no-dereference \ "${REPO_ROOT_DIR}/logging" "${TMP_DIFFROOT}/logging" || ret=1 -diff -Naupr --no-dereference \ - "${REPO_ROOT_DIR}/metrics" "${TMP_DIFFROOT}/metrics" || ret=1 - diff -Naupr --no-dereference \ "${REPO_ROOT_DIR}/testing" "${TMP_DIFFROOT}/testing" || ret=1 @@ -69,7 +65,6 @@ rm -fr \ "${REPO_ROOT_DIR}/go.sum" \ "${REPO_ROOT_DIR}/apis" \ "${REPO_ROOT_DIR}/logging" \ - "${REPO_ROOT_DIR}/metrics" \ "${REPO_ROOT_DIR}/testing" \ "${REPO_ROOT_DIR}/vendor" diff --git a/vendor/knative.dev/pkg/network/transports.go b/vendor/knative.dev/pkg/network/transports.go index 1e9c6c219865..ce8a72b4ebc2 100644 --- a/vendor/knative.dev/pkg/network/transports.go +++ b/vendor/knative.dev/pkg/network/transports.go @@ -96,7 +96,8 @@ func dialBackOffHelper(ctx context.Context, network, address string, bo wait.Bac if tlsConf == nil { c, err = dialer.DialContext(ctx, network, address) } else { - c, err = tls.DialWithDialer(dialer, network, address, tlsConf) + d := tls.Dialer{NetDialer: dialer, Config: tlsConf} + c, err = d.DialContext(ctx, network, address) } if err != nil { var errNet net.Error diff --git a/vendor/knative.dev/pkg/reconciler/OWNERS b/vendor/knative.dev/pkg/reconciler/OWNERS new file mode 100644 index 000000000000..136197a30305 --- /dev/null +++ b/vendor/knative.dev/pkg/reconciler/OWNERS @@ -0,0 +1,7 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- serving-writers + +reviewers: +- serving-writers diff --git a/vendor/knative.dev/pkg/resolver/OWNERS b/vendor/knative.dev/pkg/resolver/OWNERS new file mode 100644 index 000000000000..b5e9581f4ed0 --- /dev/null +++ b/vendor/knative.dev/pkg/resolver/OWNERS @@ -0,0 +1,8 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- eventing-writers + +reviewers: +- eventing-reviewers + diff --git a/vendor/knative.dev/pkg/test/OWNERS b/vendor/knative.dev/pkg/test/OWNERS new file mode 100644 index 000000000000..65aa9e7b118e --- /dev/null +++ b/vendor/knative.dev/pkg/test/OWNERS @@ -0,0 +1,10 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- productivity-writers + +reviewers: +- productivity-reviewers + +labels: +- area/test-and-release diff --git a/vendor/knative.dev/pkg/webhook/OWNERS b/vendor/knative.dev/pkg/webhook/OWNERS new file mode 100644 index 000000000000..64660c9e35d3 --- /dev/null +++ b/vendor/knative.dev/pkg/webhook/OWNERS @@ -0,0 +1,7 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- serving-writers + +reviewers: +- serving-reviewers diff --git a/vendor/modules.txt b/vendor/modules.txt index ff23bcfc0302..19b44aab2cc6 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1450,7 +1450,7 @@ k8s.io/utils/net k8s.io/utils/pointer k8s.io/utils/ptr k8s.io/utils/trace -# knative.dev/caching v0.0.0-20250821143751-b982aa0cd1c1 +# knative.dev/caching v0.0.0-20250909014531-e918af7eb00b ## explicit; go 1.24.0 knative.dev/caching/config knative.dev/caching/pkg/apis/caching @@ -1471,10 +1471,10 @@ knative.dev/caching/pkg/client/injection/informers/caching/v1alpha1/image/fake knative.dev/caching/pkg/client/injection/informers/factory knative.dev/caching/pkg/client/injection/informers/factory/fake knative.dev/caching/pkg/client/listers/caching/v1alpha1 -# knative.dev/hack v0.0.0-20250902154142-af735b2738d6 +# knative.dev/hack v0.0.0-20250902153942-1499de21e119 ## explicit; go 1.21 knative.dev/hack -# knative.dev/networking v0.0.0-20250821144952-042b64d7bbde +# knative.dev/networking v0.0.0-20250909015233-e3b68fc57bea ## explicit; go 1.24.0 knative.dev/networking/config knative.dev/networking/pkg diff --git a/vendor/sigs.k8s.io/json/OWNERS b/vendor/sigs.k8s.io/json/OWNERS new file mode 100644 index 000000000000..a08a434e615e --- /dev/null +++ b/vendor/sigs.k8s.io/json/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - deads2k + - jpbetz + - liggitt diff --git a/vendor/sigs.k8s.io/randfill/OWNERS b/vendor/sigs.k8s.io/randfill/OWNERS new file mode 100644 index 000000000000..59f6a50f6b6f --- /dev/null +++ b/vendor/sigs.k8s.io/randfill/OWNERS @@ -0,0 +1,8 @@ +# See the OWNERS docs at https://go.k8s.io/owners +# See the OWNERS_ALIASES file at https://github.com/kubernetes-sigs/randfill/blob/main/OWNERS_ALIASES for a list of members for each alias. + +approvers: + - sig-testing-leads + - thockin + +reviewers: [] diff --git a/vendor/sigs.k8s.io/randfill/OWNERS_ALIASES b/vendor/sigs.k8s.io/randfill/OWNERS_ALIASES new file mode 100644 index 000000000000..927f1209b1d1 --- /dev/null +++ b/vendor/sigs.k8s.io/randfill/OWNERS_ALIASES @@ -0,0 +1,14 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md +# This file should be kept in sync with k/org. + +aliases: + # Reference: https://github.com/kubernetes/org/blob/main/OWNERS_ALIASES + sig-testing-leads: + - BenTheElder + - alvaroaleman + - aojea + - cjwagner + - jbpratt + - michelle192837 + - pohly + - xmcqueen diff --git a/vendor/sigs.k8s.io/yaml/OWNERS b/vendor/sigs.k8s.io/yaml/OWNERS new file mode 100644 index 000000000000..003a149e151e --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/OWNERS @@ -0,0 +1,23 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- dims +- jpbetz +- smarterclayton +- deads2k +- sttts +- liggitt +reviewers: +- dims +- thockin +- jpbetz +- smarterclayton +- wojtek-t +- deads2k +- derekwaynecarr +- mikedanese +- liggitt +- sttts +- tallclair +labels: +- sig/api-machinery From f26e7d4c87da5f6239d850bcf864a4a5c2f48d7a Mon Sep 17 00:00:00 2001 From: Elijah Roussos Date: Tue, 30 Sep 2025 15:13:31 -0400 Subject: [PATCH 10/13] chore: update codegen --- pkg/autoscaler/metrics/stat.pb.go | 3 +- test/test_images/grpc-ping/proto/ping.pb.go | 40 ++---- .../v2/internal/httprule/BUILD.bazel | 35 ----- .../grpc-gateway/v2/runtime/BUILD.bazel | 97 -------------- .../grpc-gateway/v2/utilities/BUILD.bazel | 31 ----- .../k8s.io/apimachinery/pkg/api/errors/OWNERS | 16 --- .../k8s.io/apimachinery/pkg/api/meta/OWNERS | 15 --- .../apimachinery/pkg/api/resource/OWNERS | 10 -- .../apimachinery/pkg/api/validation/OWNERS | 11 -- .../apimachinery/pkg/apis/meta/v1/OWNERS | 17 --- .../apimachinery/pkg/util/mergepatch/OWNERS | 6 - .../pkg/util/strategicpatch/OWNERS | 9 -- .../apimachinery/pkg/util/validation/OWNERS | 11 -- .../third_party/forked/golang/json/OWNERS | 6 - .../client-go/applyconfigurations/OWNERS | 6 - vendor/k8s.io/client-go/openapi/OWNERS | 4 - .../pkg/apis/clientauthentication/OWNERS | 8 -- vendor/k8s.io/client-go/rest/OWNERS | 14 -- vendor/k8s.io/client-go/tools/auth/OWNERS | 8 -- vendor/k8s.io/client-go/tools/cache/OWNERS | 27 ---- .../client-go/tools/leaderelection/OWNERS | 13 -- vendor/k8s.io/client-go/tools/metrics/OWNERS | 5 - vendor/k8s.io/client-go/tools/record/OWNERS | 6 - vendor/k8s.io/client-go/transport/OWNERS | 8 -- vendor/k8s.io/client-go/util/cert/OWNERS | 8 -- vendor/k8s.io/client-go/util/keyutil/OWNERS | 6 - vendor/k8s.io/client-go/util/retry/OWNERS | 4 - vendor/k8s.io/code-generator/OWNERS | 16 --- .../code-generator/cmd/client-gen/OWNERS | 11 -- .../code-generator/cmd/go-to-protobuf/OWNERS | 6 - vendor/k8s.io/klog/OWNERS | 19 --- vendor/k8s.io/klog/v2/OWNERS | 16 --- .../kube-openapi/pkg/generators/rules/OWNERS | 4 - .../k8s.io/kube-openapi/pkg/util/proto/OWNERS | 2 - vendor/k8s.io/utils/pointer/OWNERS | 10 -- vendor/k8s.io/utils/ptr/OWNERS | 10 -- vendor/knative.dev/hack/OWNERS | 8 -- vendor/knative.dev/hack/OWNERS_ALIASES | 126 ------------------ vendor/knative.dev/pkg/apis/OWNERS | 15 --- vendor/knative.dev/pkg/apis/duck/OWNERS | 8 -- vendor/knative.dev/pkg/controller/OWNERS | 7 - vendor/knative.dev/pkg/reconciler/OWNERS | 7 - vendor/knative.dev/pkg/resolver/OWNERS | 8 -- vendor/knative.dev/pkg/test/OWNERS | 10 -- vendor/knative.dev/pkg/webhook/OWNERS | 7 - vendor/sigs.k8s.io/json/OWNERS | 6 - vendor/sigs.k8s.io/randfill/OWNERS | 8 -- vendor/sigs.k8s.io/randfill/OWNERS_ALIASES | 14 -- vendor/sigs.k8s.io/yaml/OWNERS | 23 ---- 49 files changed, 11 insertions(+), 754 deletions(-) delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/validation/OWNERS delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/validation/OWNERS delete mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/OWNERS delete mode 100644 vendor/k8s.io/client-go/openapi/OWNERS delete mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS delete mode 100644 vendor/k8s.io/client-go/rest/OWNERS delete mode 100644 vendor/k8s.io/client-go/tools/auth/OWNERS delete mode 100644 vendor/k8s.io/client-go/tools/cache/OWNERS delete mode 100644 vendor/k8s.io/client-go/tools/leaderelection/OWNERS delete mode 100644 vendor/k8s.io/client-go/tools/metrics/OWNERS delete mode 100644 vendor/k8s.io/client-go/tools/record/OWNERS delete mode 100644 vendor/k8s.io/client-go/transport/OWNERS delete mode 100644 vendor/k8s.io/client-go/util/cert/OWNERS delete mode 100644 vendor/k8s.io/client-go/util/keyutil/OWNERS delete mode 100644 vendor/k8s.io/client-go/util/retry/OWNERS delete mode 100644 vendor/k8s.io/code-generator/OWNERS delete mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/OWNERS delete mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/OWNERS delete mode 100644 vendor/k8s.io/klog/OWNERS delete mode 100644 vendor/k8s.io/klog/v2/OWNERS delete mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/rules/OWNERS delete mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS delete mode 100644 vendor/k8s.io/utils/pointer/OWNERS delete mode 100644 vendor/k8s.io/utils/ptr/OWNERS delete mode 100644 vendor/knative.dev/hack/OWNERS delete mode 100644 vendor/knative.dev/hack/OWNERS_ALIASES delete mode 100644 vendor/knative.dev/pkg/apis/OWNERS delete mode 100644 vendor/knative.dev/pkg/apis/duck/OWNERS delete mode 100644 vendor/knative.dev/pkg/controller/OWNERS delete mode 100644 vendor/knative.dev/pkg/reconciler/OWNERS delete mode 100644 vendor/knative.dev/pkg/resolver/OWNERS delete mode 100644 vendor/knative.dev/pkg/test/OWNERS delete mode 100644 vendor/knative.dev/pkg/webhook/OWNERS delete mode 100644 vendor/sigs.k8s.io/json/OWNERS delete mode 100644 vendor/sigs.k8s.io/randfill/OWNERS delete mode 100644 vendor/sigs.k8s.io/randfill/OWNERS_ALIASES delete mode 100644 vendor/sigs.k8s.io/yaml/OWNERS diff --git a/pkg/autoscaler/metrics/stat.pb.go b/pkg/autoscaler/metrics/stat.pb.go index 58024c8ed1a5..99df6f71699d 100644 --- a/pkg/autoscaler/metrics/stat.pb.go +++ b/pkg/autoscaler/metrics/stat.pb.go @@ -22,11 +22,10 @@ package metrics import ( encoding_binary "encoding/binary" fmt "fmt" + proto "github.com/gogo/protobuf/proto" io "io" math "math" math_bits "math/bits" - - proto "github.com/gogo/protobuf/proto" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/test/test_images/grpc-ping/proto/ping.pb.go b/test/test_images/grpc-ping/proto/ping.pb.go index 90b6f6b6b7e8..f1cf30f9c35a 100644 --- a/test/test_images/grpc-ping/proto/ping.pb.go +++ b/test/test_images/grpc-ping/proto/ping.pb.go @@ -22,22 +22,19 @@ package ping import ( context "context" fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - proto "github.com/gogo/protobuf/proto" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" ) // Reference imports to suppress errors if they are not otherwise used. -var ( - _ = proto.Marshal - _ = fmt.Errorf - _ = math.Inf -) +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. @@ -55,11 +52,9 @@ func (*Request) ProtoMessage() {} func (*Request) Descriptor() ([]byte, []int) { return fileDescriptor_baa7ad5f099fe3e5, []int{0} } - func (m *Request) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } - func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_Request.Marshal(b, m, deterministic) @@ -72,15 +67,12 @@ func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return b[:n], nil } } - func (m *Request) XXX_Merge(src proto.Message) { xxx_messageInfo_Request.Merge(m, src) } - func (m *Request) XXX_Size() int { return m.Size() } - func (m *Request) XXX_DiscardUnknown() { xxx_messageInfo_Request.DiscardUnknown(m) } @@ -104,11 +96,9 @@ func (*Response) ProtoMessage() {} func (*Response) Descriptor() ([]byte, []int) { return fileDescriptor_baa7ad5f099fe3e5, []int{1} } - func (m *Response) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } - func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_Response.Marshal(b, m, deterministic) @@ -121,15 +111,12 @@ func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return b[:n], nil } } - func (m *Response) XXX_Merge(src proto.Message) { xxx_messageInfo_Response.Merge(m, src) } - func (m *Response) XXX_Size() int { return m.Size() } - func (m *Response) XXX_DiscardUnknown() { xxx_messageInfo_Response.DiscardUnknown(m) } @@ -169,10 +156,8 @@ var fileDescriptor_baa7ad5f099fe3e5 = []byte{ } // Reference imports to suppress errors if they are not otherwise used. -var ( - _ context.Context - _ grpc.ClientConn -) +var _ context.Context +var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. @@ -241,12 +226,12 @@ type PingServiceServer interface { } // UnimplementedPingServiceServer can be embedded to have forward compatible implementations. -type UnimplementedPingServiceServer struct{} +type UnimplementedPingServiceServer struct { +} func (*UnimplementedPingServiceServer) Ping(ctx context.Context, req *Request) (*Response, error) { return nil, status.Errorf(codes.Unimplemented, "method Ping not implemented") } - func (*UnimplementedPingServiceServer) PingStream(srv PingService_PingStreamServer) error { return status.Errorf(codes.Unimplemented, "method PingStream not implemented") } @@ -390,7 +375,6 @@ func encodeVarintPing(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } - func (m *Request) Size() (n int) { if m == nil { return 0 @@ -420,11 +404,9 @@ func (m *Response) Size() (n int) { func sovPing(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } - func sozPing(x uint64) (n int) { return sovPing(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } - func (m *Request) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -507,7 +489,6 @@ func (m *Request) Unmarshal(dAtA []byte) error { } return nil } - func (m *Response) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -590,7 +571,6 @@ func (m *Response) Unmarshal(dAtA []byte) error { } return nil } - func skipPing(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel deleted file mode 100644 index b8fbb2b77c40..000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel +++ /dev/null @@ -1,35 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -package(default_visibility = ["//visibility:public"]) - -go_library( - name = "httprule", - srcs = [ - "compile.go", - "parse.go", - "types.go", - ], - importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule", - deps = ["//utilities"], -) - -go_test( - name = "httprule_test", - size = "small", - srcs = [ - "compile_test.go", - "parse_test.go", - "types_test.go", - ], - embed = [":httprule"], - deps = [ - "//utilities", - "@org_golang_google_grpc//grpclog", - ], -) - -alias( - name = "go_default_library", - actual = ":httprule", - visibility = ["//:__subpackages__"], -) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel deleted file mode 100644 index a65d88eb8658..000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel +++ /dev/null @@ -1,97 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -package(default_visibility = ["//visibility:public"]) - -go_library( - name = "runtime", - srcs = [ - "context.go", - "convert.go", - "doc.go", - "errors.go", - "fieldmask.go", - "handler.go", - "marshal_httpbodyproto.go", - "marshal_json.go", - "marshal_jsonpb.go", - "marshal_proto.go", - "marshaler.go", - "marshaler_registry.go", - "mux.go", - "pattern.go", - "proto2_convert.go", - "query.go", - ], - importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/runtime", - deps = [ - "//internal/httprule", - "//utilities", - "@org_golang_google_genproto_googleapis_api//httpbody", - "@org_golang_google_grpc//codes", - "@org_golang_google_grpc//grpclog", - "@org_golang_google_grpc//health/grpc_health_v1", - "@org_golang_google_grpc//metadata", - "@org_golang_google_grpc//status", - "@org_golang_google_protobuf//encoding/protojson", - "@org_golang_google_protobuf//proto", - "@org_golang_google_protobuf//reflect/protoreflect", - "@org_golang_google_protobuf//reflect/protoregistry", - "@org_golang_google_protobuf//types/known/durationpb", - "@org_golang_google_protobuf//types/known/fieldmaskpb", - "@org_golang_google_protobuf//types/known/structpb", - "@org_golang_google_protobuf//types/known/timestamppb", - "@org_golang_google_protobuf//types/known/wrapperspb", - ], -) - -go_test( - name = "runtime_test", - size = "small", - srcs = [ - "context_test.go", - "convert_test.go", - "errors_test.go", - "fieldmask_test.go", - "handler_test.go", - "marshal_httpbodyproto_test.go", - "marshal_json_test.go", - "marshal_jsonpb_test.go", - "marshal_proto_test.go", - "marshaler_registry_test.go", - "mux_internal_test.go", - "mux_test.go", - "pattern_test.go", - "query_fuzz_test.go", - "query_test.go", - ], - embed = [":runtime"], - deps = [ - "//runtime/internal/examplepb", - "//utilities", - "@com_github_google_go_cmp//cmp", - "@com_github_google_go_cmp//cmp/cmpopts", - "@org_golang_google_genproto_googleapis_api//httpbody", - "@org_golang_google_genproto_googleapis_rpc//errdetails", - "@org_golang_google_genproto_googleapis_rpc//status", - "@org_golang_google_grpc//:grpc", - "@org_golang_google_grpc//codes", - "@org_golang_google_grpc//health/grpc_health_v1", - "@org_golang_google_grpc//metadata", - "@org_golang_google_grpc//status", - "@org_golang_google_protobuf//encoding/protojson", - "@org_golang_google_protobuf//proto", - "@org_golang_google_protobuf//testing/protocmp", - "@org_golang_google_protobuf//types/known/durationpb", - "@org_golang_google_protobuf//types/known/emptypb", - "@org_golang_google_protobuf//types/known/fieldmaskpb", - "@org_golang_google_protobuf//types/known/structpb", - "@org_golang_google_protobuf//types/known/timestamppb", - "@org_golang_google_protobuf//types/known/wrapperspb", - ], -) - -alias( - name = "go_default_library", - actual = ":runtime", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel deleted file mode 100644 index b89409465773..000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel +++ /dev/null @@ -1,31 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -package(default_visibility = ["//visibility:public"]) - -go_library( - name = "utilities", - srcs = [ - "doc.go", - "pattern.go", - "readerfactory.go", - "string_array_flag.go", - "trie.go", - ], - importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/utilities", -) - -go_test( - name = "utilities_test", - size = "small", - srcs = [ - "string_array_flag_test.go", - "trie_test.go", - ], - deps = [":utilities"], -) - -alias( - name = "go_default_library", - actual = ":utilities", - visibility = ["//visibility:public"], -) diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS deleted file mode 100644 index 1a9f5e7706b5..000000000000 --- a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS +++ /dev/null @@ -1,16 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -reviewers: - - thockin - - smarterclayton - - wojtek-t - - deads2k - - derekwaynecarr - - caesarxuchao - - mikedanese - - liggitt - - saad-ali - - janetkuo - - tallclair - - dims - - cjcullen diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS deleted file mode 100644 index 3bd8bf535e65..000000000000 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS +++ /dev/null @@ -1,15 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -reviewers: - - thockin - - smarterclayton - - wojtek-t - - deads2k - - derekwaynecarr - - caesarxuchao - - mikedanese - - liggitt - - janetkuo - - dims -emeritus_reviewers: - - ncdc diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS deleted file mode 100644 index 063fd285dad1..000000000000 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS +++ /dev/null @@ -1,10 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -reviewers: - - thockin - - smarterclayton - - wojtek-t - - derekwaynecarr - - mikedanese - - saad-ali - - janetkuo diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/validation/OWNERS deleted file mode 100644 index 40237324761f..000000000000 --- a/vendor/k8s.io/apimachinery/pkg/api/validation/OWNERS +++ /dev/null @@ -1,11 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -# Disable inheritance as this is an api owners file -options: - no_parent_owners: true -approvers: - - api-approvers -reviewers: - - api-reviewers -labels: - - kind/api-change diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS deleted file mode 100644 index ec414a84b919..000000000000 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS +++ /dev/null @@ -1,17 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -reviewers: - - thockin - - smarterclayton - - wojtek-t - - deads2k - - caesarxuchao - - liggitt - - sttts - - luxas - - janetkuo - - justinsb - - soltysh - - dims -emeritus_reviewers: - - ncdc diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS deleted file mode 100644 index 349bc69d6582..000000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: - - pwittrock -reviewers: - - apelisse diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS deleted file mode 100644 index 73244449f2c0..000000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS +++ /dev/null @@ -1,9 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: - - apelisse - - pwittrock -reviewers: - - apelisse -emeritus_approvers: - - mengqiy diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/validation/OWNERS deleted file mode 100644 index 40237324761f..000000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/OWNERS +++ /dev/null @@ -1,11 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -# Disable inheritance as this is an api owners file -options: - no_parent_owners: true -approvers: - - api-approvers -reviewers: - - api-reviewers -labels: - - kind/api-change diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS deleted file mode 100644 index 349bc69d6582..000000000000 --- a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: - - pwittrock -reviewers: - - apelisse diff --git a/vendor/k8s.io/client-go/applyconfigurations/OWNERS b/vendor/k8s.io/client-go/applyconfigurations/OWNERS deleted file mode 100644 index de4067fd35f9..000000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: - - apelisse - - jpbetz - - api-approvers diff --git a/vendor/k8s.io/client-go/openapi/OWNERS b/vendor/k8s.io/client-go/openapi/OWNERS deleted file mode 100644 index e61009424261..000000000000 --- a/vendor/k8s.io/client-go/openapi/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: - - apelisse diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS deleted file mode 100644 index 4dfbb98aec85..000000000000 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS +++ /dev/null @@ -1,8 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -# approval on api packages bubbles to api-approvers -reviewers: - - sig-auth-authenticators-approvers - - sig-auth-authenticators-reviewers -labels: - - sig/auth diff --git a/vendor/k8s.io/client-go/rest/OWNERS b/vendor/k8s.io/client-go/rest/OWNERS deleted file mode 100644 index 7b23294c45e5..000000000000 --- a/vendor/k8s.io/client-go/rest/OWNERS +++ /dev/null @@ -1,14 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -reviewers: - - thockin - - smarterclayton - - caesarxuchao - - wojtek-t - - deads2k - - liggitt - - sttts - - luxas - - dims - - cjcullen - - lojies diff --git a/vendor/k8s.io/client-go/tools/auth/OWNERS b/vendor/k8s.io/client-go/tools/auth/OWNERS deleted file mode 100644 index c4ea6463df4d..000000000000 --- a/vendor/k8s.io/client-go/tools/auth/OWNERS +++ /dev/null @@ -1,8 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: - - sig-auth-authenticators-approvers -reviewers: - - sig-auth-authenticators-reviewers -labels: - - sig/auth diff --git a/vendor/k8s.io/client-go/tools/cache/OWNERS b/vendor/k8s.io/client-go/tools/cache/OWNERS deleted file mode 100644 index fc441e0efa8f..000000000000 --- a/vendor/k8s.io/client-go/tools/cache/OWNERS +++ /dev/null @@ -1,27 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: - - thockin - - smarterclayton - - wojtek-t - - deads2k - - caesarxuchao - - liggitt -reviewers: - - thockin - - smarterclayton - - wojtek-t - - deads2k - - derekwaynecarr - - caesarxuchao - - mikedanese - - liggitt - - janetkuo - - justinsb - - soltysh - - jsafrane - - dims - - ingvagabund -emeritus_approvers: - - lavalamp - - ncdc diff --git a/vendor/k8s.io/client-go/tools/leaderelection/OWNERS b/vendor/k8s.io/client-go/tools/leaderelection/OWNERS deleted file mode 100644 index 70787f2b524d..000000000000 --- a/vendor/k8s.io/client-go/tools/leaderelection/OWNERS +++ /dev/null @@ -1,13 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: - - mikedanese - - jefftree -reviewers: - - wojtek-t - - deads2k - - mikedanese - - ingvagabund - - jefftree -emeritus_approvers: - - timothysc diff --git a/vendor/k8s.io/client-go/tools/metrics/OWNERS b/vendor/k8s.io/client-go/tools/metrics/OWNERS deleted file mode 100644 index 2c9488a5fb2c..000000000000 --- a/vendor/k8s.io/client-go/tools/metrics/OWNERS +++ /dev/null @@ -1,5 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -reviewers: - - wojtek-t - - jayunit100 diff --git a/vendor/k8s.io/client-go/tools/record/OWNERS b/vendor/k8s.io/client-go/tools/record/OWNERS deleted file mode 100644 index 8105c4fe087c..000000000000 --- a/vendor/k8s.io/client-go/tools/record/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -reviewers: - - sig-instrumentation-reviewers -approvers: - - sig-instrumentation-approvers diff --git a/vendor/k8s.io/client-go/transport/OWNERS b/vendor/k8s.io/client-go/transport/OWNERS deleted file mode 100644 index 34adee5ec539..000000000000 --- a/vendor/k8s.io/client-go/transport/OWNERS +++ /dev/null @@ -1,8 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -reviewers: - - smarterclayton - - wojtek-t - - deads2k - - liggitt - - caesarxuchao diff --git a/vendor/k8s.io/client-go/util/cert/OWNERS b/vendor/k8s.io/client-go/util/cert/OWNERS deleted file mode 100644 index 3c3b94c58c3f..000000000000 --- a/vendor/k8s.io/client-go/util/cert/OWNERS +++ /dev/null @@ -1,8 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: - - sig-auth-certificates-approvers -reviewers: - - sig-auth-certificates-reviewers -labels: - - sig/auth diff --git a/vendor/k8s.io/client-go/util/keyutil/OWNERS b/vendor/k8s.io/client-go/util/keyutil/OWNERS deleted file mode 100644 index e6d229d5dbe1..000000000000 --- a/vendor/k8s.io/client-go/util/keyutil/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -approvers: - - sig-auth-certificates-approvers -reviewers: - - sig-auth-certificates-reviewers -labels: - - sig/auth diff --git a/vendor/k8s.io/client-go/util/retry/OWNERS b/vendor/k8s.io/client-go/util/retry/OWNERS deleted file mode 100644 index 75736b5aace8..000000000000 --- a/vendor/k8s.io/client-go/util/retry/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -reviewers: - - caesarxuchao diff --git a/vendor/k8s.io/code-generator/OWNERS b/vendor/k8s.io/code-generator/OWNERS deleted file mode 100644 index d16e47e85d28..000000000000 --- a/vendor/k8s.io/code-generator/OWNERS +++ /dev/null @@ -1,16 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: - - deads2k - - jpbetz - - wojtek-t - - sttts -reviewers: - - deads2k - - wojtek-t - - sttts -labels: - - sig/api-machinery - - area/code-generation -emeritus_approvers: - - lavalamp diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/OWNERS b/vendor/k8s.io/code-generator/cmd/client-gen/OWNERS deleted file mode 100644 index 967eb2a7bbc3..000000000000 --- a/vendor/k8s.io/code-generator/cmd/client-gen/OWNERS +++ /dev/null @@ -1,11 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: - - wojtek-t - - caesarxuchao -reviewers: - - wojtek-t - - caesarxuchao - - jpbetz -emeritus_approvers: - - lavalamp diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/OWNERS b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/OWNERS deleted file mode 100644 index af7e2ec4c7d3..000000000000 --- a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: - - smarterclayton -reviewers: - - smarterclayton diff --git a/vendor/k8s.io/klog/OWNERS b/vendor/k8s.io/klog/OWNERS deleted file mode 100644 index 380e514f2807..000000000000 --- a/vendor/k8s.io/klog/OWNERS +++ /dev/null @@ -1,19 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners -reviewers: - - jayunit100 - - hoegaarden - - andyxning - - neolit123 - - pohly - - yagonobre - - vincepri - - detiber -approvers: - - dims - - thockin - - justinsb - - tallclair - - piosz - - brancz - - DirectXMan12 - - lavalamp diff --git a/vendor/k8s.io/klog/v2/OWNERS b/vendor/k8s.io/klog/v2/OWNERS deleted file mode 100644 index 7500475a64af..000000000000 --- a/vendor/k8s.io/klog/v2/OWNERS +++ /dev/null @@ -1,16 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners -reviewers: - - harshanarayana - - mengjiao-liu - - pohly -approvers: - - dims - - pohly - - thockin -emeritus_approvers: - - brancz - - justinsb - - lavalamp - - piosz - - serathius - - tallclair diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/rules/OWNERS b/vendor/k8s.io/kube-openapi/pkg/generators/rules/OWNERS deleted file mode 100644 index 235bc545b88b..000000000000 --- a/vendor/k8s.io/kube-openapi/pkg/generators/rules/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -reviewers: -- roycaihw -approvers: -- roycaihw diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS b/vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS deleted file mode 100644 index 9621a6a3a4ac..000000000000 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS +++ /dev/null @@ -1,2 +0,0 @@ -approvers: -- apelisse diff --git a/vendor/k8s.io/utils/pointer/OWNERS b/vendor/k8s.io/utils/pointer/OWNERS deleted file mode 100644 index 0d6392752af2..000000000000 --- a/vendor/k8s.io/utils/pointer/OWNERS +++ /dev/null @@ -1,10 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: -- apelisse -- stewart-yu -- thockin -reviewers: -- apelisse -- stewart-yu -- thockin diff --git a/vendor/k8s.io/utils/ptr/OWNERS b/vendor/k8s.io/utils/ptr/OWNERS deleted file mode 100644 index 0d6392752af2..000000000000 --- a/vendor/k8s.io/utils/ptr/OWNERS +++ /dev/null @@ -1,10 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: -- apelisse -- stewart-yu -- thockin -reviewers: -- apelisse -- stewart-yu -- thockin diff --git a/vendor/knative.dev/hack/OWNERS b/vendor/knative.dev/hack/OWNERS deleted file mode 100644 index 4d20bf8cffef..000000000000 --- a/vendor/knative.dev/hack/OWNERS +++ /dev/null @@ -1,8 +0,0 @@ -approvers: - - technical-oversight-committee - - productivity-writers - - knative-release-leads - -reviewers: - - productivity-writers - - productivity-reviewers diff --git a/vendor/knative.dev/hack/OWNERS_ALIASES b/vendor/knative.dev/hack/OWNERS_ALIASES deleted file mode 100644 index f2e303479b6d..000000000000 --- a/vendor/knative.dev/hack/OWNERS_ALIASES +++ /dev/null @@ -1,126 +0,0 @@ -# This file is auto-generated from peribolos. -# Do not modify this file, instead modify peribolos/knative.yaml - -aliases: - client-reviewers: [] - client-wg-leads: - - dsimansk - client-writers: - - dsimansk - docs-reviewers: - - nainaz - - skonto - docs-writers: - - skonto - eventing-reviewers: - - Leo6Leo - - aslom - - cali0707 - - creydr - eventing-wg-leads: - - creydr - - pierDipi - eventing-writers: - - Leo6Leo - - aliok - - cali0707 - - creydr - - matzew - - pierDipi - func-reviewers: - - jrangelramos - - nainaz - func-writers: - - gauron99 - - jrangelramos - - lkingland - - matejvasek - - matzew - - salaboy - functions-wg-leads: - - lkingland - - salaboy - knative-admin: - - aliok - - arsenetar - - cardil - - dprotaso - - dsimansk - - evankanderson - - gauron99 - - knative-automation - - knative-prow-releaser-robot - - knative-prow-robot - - knative-prow-updater-robot - - knative-test-reporter-robot - - matzew - - skonto - - upodroid - knative-release-leads: - - dprotaso - - dsimansk - - gauron99 - - skonto - knative-robots: - - knative-automation - - knative-prow-releaser-robot - - knative-prow-robot - - knative-prow-updater-robot - - knative-test-reporter-robot - operations-reviewers: - - aliok - - houshengbo - - matzew - operations-wg-leads: - - houshengbo - operations-writers: - - aliok - - houshengbo - - matzew - productivity-leads: - - cardil - - upodroid - productivity-reviewers: - - evankanderson - - mgencur - productivity-wg-leads: - - cardil - - upodroid - productivity-writers: - - cardil - - upodroid - security-wg-leads: - - davidhadas - - evankanderson - security-writers: - - davidhadas - - evankanderson - serving-approvers: - - dsimansk - - skonto - serving-reviewers: - - skonto - serving-triage: - - skonto - serving-wg-leads: - - dprotaso - serving-writers: - - dprotaso - - dsimansk - - skonto - steering-committee: - - aliok - - arsenetar - - dprotaso - - evankanderson - - matzew - ux-wg-leads: - - Leo6Leo - - cali0707 - - mmejia02 - - zainabhusain227 - ux-writers: - - Leo6Leo - - cali0707 - - mmejia02 - - zainabhusain227 diff --git a/vendor/knative.dev/pkg/apis/OWNERS b/vendor/knative.dev/pkg/apis/OWNERS deleted file mode 100644 index 13014203fc86..000000000000 --- a/vendor/knative.dev/pkg/apis/OWNERS +++ /dev/null @@ -1,15 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- technical-oversight-committee -- serving-wg-leads -- eventing-wg-leads - -reviewers: -- serving-writers -- eventing-writers -- eventing-reviewers -- serving-reviewers - -options: - no_parent_owners: true diff --git a/vendor/knative.dev/pkg/apis/duck/OWNERS b/vendor/knative.dev/pkg/apis/duck/OWNERS deleted file mode 100644 index af1eb05dac49..000000000000 --- a/vendor/knative.dev/pkg/apis/duck/OWNERS +++ /dev/null @@ -1,8 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- eventing-wg-leads - -reviewers: -- eventing-reviewers -- eventing-writers diff --git a/vendor/knative.dev/pkg/controller/OWNERS b/vendor/knative.dev/pkg/controller/OWNERS deleted file mode 100644 index 64660c9e35d3..000000000000 --- a/vendor/knative.dev/pkg/controller/OWNERS +++ /dev/null @@ -1,7 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- serving-writers - -reviewers: -- serving-reviewers diff --git a/vendor/knative.dev/pkg/reconciler/OWNERS b/vendor/knative.dev/pkg/reconciler/OWNERS deleted file mode 100644 index 136197a30305..000000000000 --- a/vendor/knative.dev/pkg/reconciler/OWNERS +++ /dev/null @@ -1,7 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- serving-writers - -reviewers: -- serving-writers diff --git a/vendor/knative.dev/pkg/resolver/OWNERS b/vendor/knative.dev/pkg/resolver/OWNERS deleted file mode 100644 index b5e9581f4ed0..000000000000 --- a/vendor/knative.dev/pkg/resolver/OWNERS +++ /dev/null @@ -1,8 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- eventing-writers - -reviewers: -- eventing-reviewers - diff --git a/vendor/knative.dev/pkg/test/OWNERS b/vendor/knative.dev/pkg/test/OWNERS deleted file mode 100644 index 65aa9e7b118e..000000000000 --- a/vendor/knative.dev/pkg/test/OWNERS +++ /dev/null @@ -1,10 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- productivity-writers - -reviewers: -- productivity-reviewers - -labels: -- area/test-and-release diff --git a/vendor/knative.dev/pkg/webhook/OWNERS b/vendor/knative.dev/pkg/webhook/OWNERS deleted file mode 100644 index 64660c9e35d3..000000000000 --- a/vendor/knative.dev/pkg/webhook/OWNERS +++ /dev/null @@ -1,7 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- serving-writers - -reviewers: -- serving-reviewers diff --git a/vendor/sigs.k8s.io/json/OWNERS b/vendor/sigs.k8s.io/json/OWNERS deleted file mode 100644 index a08a434e615e..000000000000 --- a/vendor/sigs.k8s.io/json/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: - - deads2k - - jpbetz - - liggitt diff --git a/vendor/sigs.k8s.io/randfill/OWNERS b/vendor/sigs.k8s.io/randfill/OWNERS deleted file mode 100644 index 59f6a50f6b6f..000000000000 --- a/vendor/sigs.k8s.io/randfill/OWNERS +++ /dev/null @@ -1,8 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners -# See the OWNERS_ALIASES file at https://github.com/kubernetes-sigs/randfill/blob/main/OWNERS_ALIASES for a list of members for each alias. - -approvers: - - sig-testing-leads - - thockin - -reviewers: [] diff --git a/vendor/sigs.k8s.io/randfill/OWNERS_ALIASES b/vendor/sigs.k8s.io/randfill/OWNERS_ALIASES deleted file mode 100644 index 927f1209b1d1..000000000000 --- a/vendor/sigs.k8s.io/randfill/OWNERS_ALIASES +++ /dev/null @@ -1,14 +0,0 @@ -# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md -# This file should be kept in sync with k/org. - -aliases: - # Reference: https://github.com/kubernetes/org/blob/main/OWNERS_ALIASES - sig-testing-leads: - - BenTheElder - - alvaroaleman - - aojea - - cjwagner - - jbpratt - - michelle192837 - - pohly - - xmcqueen diff --git a/vendor/sigs.k8s.io/yaml/OWNERS b/vendor/sigs.k8s.io/yaml/OWNERS deleted file mode 100644 index 003a149e151e..000000000000 --- a/vendor/sigs.k8s.io/yaml/OWNERS +++ /dev/null @@ -1,23 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: -- dims -- jpbetz -- smarterclayton -- deads2k -- sttts -- liggitt -reviewers: -- dims -- thockin -- jpbetz -- smarterclayton -- wojtek-t -- deads2k -- derekwaynecarr -- mikedanese -- liggitt -- sttts -- tallclair -labels: -- sig/api-machinery From eb165e51098032f9ed09717996cb0e90ef77c934 Mon Sep 17 00:00:00 2001 From: Elijah Roussos Date: Tue, 30 Sep 2025 15:28:38 -0400 Subject: [PATCH 11/13] fix: move compose handler after drainer definition --- pkg/queue/sharedmain/handlers.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/queue/sharedmain/handlers.go b/pkg/queue/sharedmain/handlers.go index 44c329bcf746..d4ced9b40049 100644 --- a/pkg/queue/sharedmain/handlers.go +++ b/pkg/queue/sharedmain/handlers.go @@ -87,8 +87,6 @@ func mainHandler( composedHandler = queue.NewRouteTagHandler(composedHandler) composedHandler = withFullDuplex(composedHandler, env.EnableHTTPFullDuplex, logger) - composedHandler = withRequestCounter(composedHandler, pendingRequests) - drainer := &pkghandler.Drainer{ QuietPeriod: drainSleepDuration, // Add Activator probe header to the drainer so it can handle probes directly from activator @@ -98,6 +96,8 @@ func mainHandler( } composedHandler = drainer + composedHandler = withRequestCounter(composedHandler, pendingRequests) + if env.Observability.EnableRequestLog { // We want to capture the probes/healthchecks in the request logs. // Hence we need to have RequestLogHandler be the first one. From e71e1cb5e823a0971e50fdf9f1962d3d4bc70745 Mon Sep 17 00:00:00 2001 From: Elijah Roussos Date: Tue, 30 Sep 2025 16:00:39 -0400 Subject: [PATCH 12/13] fix: skip knative internal requests for the pending request counter --- pkg/queue/sharedmain/handlers.go | 7 ++++++- pkg/queue/sharedmain/handlers_test.go | 26 ++++++++++++++++++++++++++ 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/pkg/queue/sharedmain/handlers.go b/pkg/queue/sharedmain/handlers.go index d4ced9b40049..6f3d58b584e3 100644 --- a/pkg/queue/sharedmain/handlers.go +++ b/pkg/queue/sharedmain/handlers.go @@ -166,7 +166,12 @@ func withFullDuplex(h http.Handler, enableFullDuplex bool, logger *zap.SugaredLo func withRequestCounter(h http.Handler, pendingRequests *atomic.Int32) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Header.Get(network.ProbeHeaderName) != network.ProbeHeaderValue && !strings.HasPrefix(r.Header.Get("User-Agent"), "kube-probe/") { + userAgent := r.Header.Get("User-Agent") + // Skip counting probe requests and internal Knative probes + if r.Header.Get(network.ProbeHeaderName) != network.ProbeHeaderValue && + !strings.HasPrefix(userAgent, "kube-probe/") && + !strings.HasPrefix(userAgent, netheader.ActivatorUserAgent) && + !strings.HasPrefix(userAgent, netheader.AutoscalingUserAgent) { pendingRequests.Add(1) defer pendingRequests.Add(-1) } diff --git a/pkg/queue/sharedmain/handlers_test.go b/pkg/queue/sharedmain/handlers_test.go index 67ca8969a7eb..a5ad6f0215cf 100644 --- a/pkg/queue/sharedmain/handlers_test.go +++ b/pkg/queue/sharedmain/handlers_test.go @@ -237,6 +237,32 @@ func TestWithRequestCounter(t *testing.T) { } }) + t.Run("skips counter for Activator probe requests", func(t *testing.T) { + req := httptest.NewRequest(http.MethodGet, "/", nil) + req.Header.Set("User-Agent", "Knative-Activator-Probe") + w := httptest.NewRecorder() + + wrappedHandler.ServeHTTP(w, req) + + // Check that counter was not incremented + if pendingRequests.Load() != 0 { + t.Errorf("Expected pending requests to remain 0 for Activator probe, got %d", pendingRequests.Load()) + } + }) + + t.Run("skips counter for Autoscaling probe requests", func(t *testing.T) { + req := httptest.NewRequest(http.MethodGet, "/", nil) + req.Header.Set("User-Agent", "Knative-Autoscaling-Probe") + w := httptest.NewRecorder() + + wrappedHandler.ServeHTTP(w, req) + + // Check that counter was not incremented + if pendingRequests.Load() != 0 { + t.Errorf("Expected pending requests to remain 0 for Autoscaling probe, got %d", pendingRequests.Load()) + } + }) + t.Run("handles concurrent requests correctly", func(t *testing.T) { // Reset counter pendingRequests.Store(0) From 1437e6f19fc04d793d6888789c33c99cc6465024 Mon Sep 17 00:00:00 2001 From: Elijah Roussos Date: Tue, 30 Sep 2025 16:49:40 -0400 Subject: [PATCH 13/13] fix: skip external k8/kn probes as well --- pkg/queue/sharedmain/handlers.go | 24 ++++++++++++++------- pkg/queue/sharedmain/handlers_test.go | 30 +++++++++++++++++++++++++-- 2 files changed, 45 insertions(+), 9 deletions(-) diff --git a/pkg/queue/sharedmain/handlers.go b/pkg/queue/sharedmain/handlers.go index 6f3d58b584e3..cf4162fa03a4 100644 --- a/pkg/queue/sharedmain/handlers.go +++ b/pkg/queue/sharedmain/handlers.go @@ -33,7 +33,6 @@ import ( netheader "knative.dev/networking/pkg/http/header" netproxy "knative.dev/networking/pkg/http/proxy" netstats "knative.dev/networking/pkg/http/stats" - "knative.dev/pkg/network" pkghandler "knative.dev/pkg/network/handlers" "knative.dev/serving/pkg/activator" pkghttp "knative.dev/serving/pkg/http" @@ -164,14 +163,25 @@ func withFullDuplex(h http.Handler, enableFullDuplex bool, logger *zap.SugaredLo }) } +func isProbeRequest(r *http.Request) bool { + // Check standard probes (K8s and Knative probe headers) + if netheader.IsProbe(r) { + return true + } + + // Check all Knative internal probe user agents that should not be counted + // as pending requests (matching what the Drainer filters) + userAgent := r.Header.Get("User-Agent") + return strings.HasPrefix(userAgent, netheader.ActivatorUserAgent) || + strings.HasPrefix(userAgent, netheader.AutoscalingUserAgent) || + strings.HasPrefix(userAgent, netheader.QueueProxyUserAgent) || + strings.HasPrefix(userAgent, netheader.IngressReadinessUserAgent) +} + func withRequestCounter(h http.Handler, pendingRequests *atomic.Int32) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - userAgent := r.Header.Get("User-Agent") - // Skip counting probe requests and internal Knative probes - if r.Header.Get(network.ProbeHeaderName) != network.ProbeHeaderValue && - !strings.HasPrefix(userAgent, "kube-probe/") && - !strings.HasPrefix(userAgent, netheader.ActivatorUserAgent) && - !strings.HasPrefix(userAgent, netheader.AutoscalingUserAgent) { + // Only count non-probe requests as pending + if !isProbeRequest(r) { pendingRequests.Add(1) defer pendingRequests.Add(-1) } diff --git a/pkg/queue/sharedmain/handlers_test.go b/pkg/queue/sharedmain/handlers_test.go index a5ad6f0215cf..b035c6eee373 100644 --- a/pkg/queue/sharedmain/handlers_test.go +++ b/pkg/queue/sharedmain/handlers_test.go @@ -26,7 +26,7 @@ import ( "time" "go.uber.org/zap" - "knative.dev/pkg/network" + netheader "knative.dev/networking/pkg/http/header" pkghandler "knative.dev/pkg/network/handlers" "knative.dev/serving/pkg/queue" ) @@ -213,7 +213,7 @@ func TestWithRequestCounter(t *testing.T) { t.Run("skips counter for probe requests", func(t *testing.T) { req := httptest.NewRequest(http.MethodGet, "/", nil) - req.Header.Set(network.ProbeHeaderName, network.ProbeHeaderValue) + req.Header.Set(netheader.ProbeKey, netheader.ProbeValue) w := httptest.NewRecorder() wrappedHandler.ServeHTTP(w, req) @@ -263,6 +263,32 @@ func TestWithRequestCounter(t *testing.T) { } }) + t.Run("skips counter for QueueProxy probe requests", func(t *testing.T) { + req := httptest.NewRequest(http.MethodGet, "/", nil) + req.Header.Set("User-Agent", "Knative-Queue-Proxy-Probe") + w := httptest.NewRecorder() + + wrappedHandler.ServeHTTP(w, req) + + // Check that counter was not incremented + if pendingRequests.Load() != 0 { + t.Errorf("Expected pending requests to remain 0 for QueueProxy probe, got %d", pendingRequests.Load()) + } + }) + + t.Run("skips counter for Ingress probe requests", func(t *testing.T) { + req := httptest.NewRequest(http.MethodGet, "/", nil) + req.Header.Set("User-Agent", "Knative-Ingress-Probe") + w := httptest.NewRecorder() + + wrappedHandler.ServeHTTP(w, req) + + // Check that counter was not incremented + if pendingRequests.Load() != 0 { + t.Errorf("Expected pending requests to remain 0 for Ingress probe, got %d", pendingRequests.Load()) + } + }) + t.Run("handles concurrent requests correctly", func(t *testing.T) { // Reset counter pendingRequests.Store(0)