diff --git a/go.mod b/go.mod index 6d839e7689ae..978db8042a27 100644 --- a/go.mod +++ b/go.mod @@ -28,12 +28,12 @@ require ( github.com/onsi/ginkgo/v2 v2.20.2 github.com/onsi/gomega v1.34.2 github.com/opencontainers/go-digest v1.0.0 - github.com/openshift/api v0.0.0-20241001152557-e415140e5d5f + github.com/openshift/api v0.0.0-20241109205306-a2817b89f7e0 github.com/openshift/apiserver-library-go v0.0.0-20241001175710-6064b62894a6 github.com/openshift/build-machinery-go v0.0.0-20240613134303-8359781da660 - github.com/openshift/client-go v0.0.0-20241001162912-da6d55e4611f + github.com/openshift/client-go v0.0.0-20241107164952-923091dd2b1a github.com/openshift/cluster-network-operator v0.0.0-20240708200319-1cd8678b38fb - github.com/openshift/library-go v0.0.0-20241015130640-f9ecd211c68b + github.com/openshift/library-go v0.0.0-20241107160307-0064ad7bd060 github.com/pborman/uuid v1.2.0 github.com/pkg/errors v0.9.1 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.74.0 @@ -52,10 +52,10 @@ require ( golang.org/x/crypto v0.27.0 golang.org/x/mod v0.20.0 golang.org/x/net v0.29.0 - golang.org/x/oauth2 v0.21.0 + golang.org/x/oauth2 v0.22.0 golang.org/x/sync v0.8.0 gonum.org/v1/plot v0.14.0 - google.golang.org/grpc v1.65.0 + google.golang.org/grpc v1.67.0 gopkg.in/ini.v1 v1.62.0 gopkg.in/src-d/go-git.v4 v4.13.1 gopkg.in/yaml.v2 v2.4.0 @@ -281,8 +281,8 @@ require ( golang.org/x/time v0.6.0 // indirect golang.org/x/tools v0.24.0 // indirect google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240725223205-93522f1f2a9f // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect @@ -336,7 +336,7 @@ replace ( k8s.io/kube-scheduler => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20241002124647-1892e4deb967 k8s.io/kubectl => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20241002124647-1892e4deb967 k8s.io/kubelet => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20241002124647-1892e4deb967 - k8s.io/kubernetes => github.com/openshift/kubernetes v1.30.1-0.20241002124647-1892e4deb967 + k8s.io/kubernetes => github.com/openshift/kubernetes v1.30.1-0.20241111135607-a3f31a4f2c58 k8s.io/metrics => github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20241002124647-1892e4deb967 k8s.io/mount-utils => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20241002124647-1892e4deb967 k8s.io/pod-security-admission => github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20241002124647-1892e4deb967 diff --git a/go.sum b/go.sum index 56ba48e9cb28..69d7f94652e2 100644 --- a/go.sum +++ b/go.sum @@ -376,8 +376,8 @@ github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVI github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -670,18 +670,18 @@ github.com/opencontainers/runtime-spec v1.0.3-0.20220909204839-494a5a6aca78 h1:R github.com/opencontainers/runtime-spec v1.0.3-0.20220909204839-494a5a6aca78/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= -github.com/openshift/api v0.0.0-20241001152557-e415140e5d5f h1:ya1OmyZm3LIIxI3U9VE9Nyx3ehCHgBwxyFUPflYPWls= -github.com/openshift/api v0.0.0-20241001152557-e415140e5d5f/go.mod h1:Shkl4HanLwDiiBzakv+con/aMGnVE2MAGvoKp5oyYUo= +github.com/openshift/api v0.0.0-20241109205306-a2817b89f7e0 h1:l4VHxObu73EoMGtdgxNXo+MeYCjQyvN/PCvSfV/SkCU= +github.com/openshift/api v0.0.0-20241109205306-a2817b89f7e0/go.mod h1:Shkl4HanLwDiiBzakv+con/aMGnVE2MAGvoKp5oyYUo= github.com/openshift/apiserver-library-go v0.0.0-20241001175710-6064b62894a6 h1:Wban+ggY6sbg611SQSOeavUeug2cRJGz0rEeXxTxIH0= github.com/openshift/apiserver-library-go v0.0.0-20241001175710-6064b62894a6/go.mod h1:9Anrq7+DZmmw1Brchx4zmh26hAZbe6Dv7bGXRclnhYI= github.com/openshift/build-machinery-go v0.0.0-20240613134303-8359781da660 h1:F0zE2bmdVvaEd18VXuGYQdJJ1FYJu4MIDW9PYZWc9No= github.com/openshift/build-machinery-go v0.0.0-20240613134303-8359781da660/go.mod h1:8jcm8UPtg2mCAsxfqKil1xrmRMI3a+XU2TZ9fF8A7TE= -github.com/openshift/client-go v0.0.0-20241001162912-da6d55e4611f h1:FRc0bVNWprihWS0GqQWzb3dY4dkCwpOP3mDw5NwSoR4= -github.com/openshift/client-go v0.0.0-20241001162912-da6d55e4611f/go.mod h1:KiZi2mJRH1TOJ3FtBDYS6YvUL30s/iIXaGSUrSa36mo= +github.com/openshift/client-go v0.0.0-20241107164952-923091dd2b1a h1:h3F55x+zOXwSG4okUlXmOlU/5CQ7Ve2HKxYjseJPVoY= +github.com/openshift/client-go v0.0.0-20241107164952-923091dd2b1a/go.mod h1:JBIcn1JfD/JCpGYnRRT0+HLxrleF/Y7T3Y/t0p8o5jk= github.com/openshift/cluster-network-operator v0.0.0-20240708200319-1cd8678b38fb h1:Dr0dbSQTAU9UaoAvimGjR+fsvwx2twJ5KR0s/jyAz88= github.com/openshift/cluster-network-operator v0.0.0-20240708200319-1cd8678b38fb/go.mod h1:LnhqxbWhAnhPwilJ4yX1/ly7wCMCYJKkaiSJQSh+Wjg= -github.com/openshift/kubernetes v1.30.1-0.20241002124647-1892e4deb967 h1:2To0erLv3UdrTL76PLTenJQ4vkM+mRh/OGtBob6/Z6k= -github.com/openshift/kubernetes v1.30.1-0.20241002124647-1892e4deb967/go.mod h1:63roE91IAzMeCfirpMMyDvG+h4MIr2pXFhj5zZORTOY= +github.com/openshift/kubernetes v1.30.1-0.20241111135607-a3f31a4f2c58 h1:SoDQVMsHuqkOhOUSfHgsF+u7e3kL9YJykwtla0d2HLg= +github.com/openshift/kubernetes v1.30.1-0.20241111135607-a3f31a4f2c58/go.mod h1:i/tRYbbdCLJCAobxOdsAUjYoYmUVsx16LAVn4mPHZgY= github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20241002124647-1892e4deb967 h1:NZDgkOu/p8NcIaq3srJRc3qpHbviEEQnUVNXpYT+qG4= github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20241002124647-1892e4deb967/go.mod h1:Dm/iYMJ6/U1e9rvAbrngZcVY7I8bYUHkzPrToon6cTc= github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20241002124647-1892e4deb967 h1:1O/eXy147kDnitzj2OscVxt9HAGmQDpviY+cpkKKCU0= @@ -726,8 +726,8 @@ github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0 github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20241002124647-1892e4deb967/go.mod h1:cmVAeH6nq/BICRHjs7alZYCGXGFzZapRDIoFsXXJSsw= github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20241002124647-1892e4deb967 h1:85E8kHrVptr6SeoFHxU/wplZweb+likneYpI4jdCBb4= github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20241002124647-1892e4deb967/go.mod h1:mtBTrqylZ7Lv0pmB5XpbDK1QCiovEdaUNo/u/vaK2Lo= -github.com/openshift/library-go v0.0.0-20241015130640-f9ecd211c68b h1:NfHXfIpgWLoxhxOf9Ci/bm1xVp6T9RBgK1MfbVVzAvw= -github.com/openshift/library-go v0.0.0-20241015130640-f9ecd211c68b/go.mod h1:9B1MYPoLtP9tqjWxcbUNVpwxy68zOH/3EIP6c31dAM0= +github.com/openshift/library-go v0.0.0-20241107160307-0064ad7bd060 h1:jiDC7d8d+jmjv2WfiMY0+Uf55q11MGyYkGGqXnfqWTU= +github.com/openshift/library-go v0.0.0-20241107160307-0064ad7bd060/go.mod h1:9B1MYPoLtP9tqjWxcbUNVpwxy68zOH/3EIP6c31dAM0= github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20240806135314-3946b2b7b2a8 h1:HJvLw9nNfoqCs16h505eP8E1kVmq6KNdzGm5csPlYsQ= github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20240806135314-3946b2b7b2a8/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -1091,8 +1091,8 @@ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= +golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1355,10 +1355,10 @@ google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaE google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= -google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0= -google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240725223205-93522f1f2a9f h1:RARaIm8pxYuxyNPbBQf5igT7XdOyCNtat1qAT2ZxjU4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240725223205-93522f1f2a9f/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f h1:jTm13A2itBi3La6yTGqn8bVSrc3ZZ1r8ENHlIXBfnRA= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f/go.mod h1:CLGoBuH1VHxAUXVPP8FfPwPEVJB6lz3URE5mY2SuayE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 h1:N9BgCIAUvn/M+p4NJccWPWb3BWh88+zyL0ll9HgbEeM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1381,8 +1381,8 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= +google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/test/extended/olm/olmv1.go b/test/extended/olm/olmv1.go index 096f190dfa45..386fffa59c56 100644 --- a/test/extended/olm/olmv1.go +++ b/test/extended/olm/olmv1.go @@ -15,6 +15,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" + configv1 "github.com/openshift/api/config/v1" + exutil "github.com/openshift/origin/test/extended/util" ) @@ -22,15 +24,13 @@ const ( olmv1GroupName = "olm.operatorframework.io" ) -var _ = g.Describe("[sig-olmv1] OLMv1 CRDs", func() { +var _ = g.Describe("[sig-olmv1][OCPFeatureGate:NewOLM] OLMv1 CRDs", func() { defer g.GinkgoRecover() oc := exutil.NewCLIWithoutNamespace("default") g.It("should be installed", func(ctx g.SpecContext) { // Check for tech preview, if this is not tech preview, bail - if !exutil.IsTechPreviewNoUpgrade(ctx, oc.AdminConfigClient()) { - g.Skip("Test only runs in tech-preview") - } + checkTestSkip(ctx, oc) // supports multiple versions during transision providedAPIs := []struct { @@ -68,15 +68,13 @@ var _ = g.Describe("[sig-olmv1] OLMv1 CRDs", func() { }) }) -var _ = g.Describe("[sig-olmv1] OLMv1 Catalogs", func() { +var _ = g.Describe("[sig-olmv1][OCPFeatureGate:NewOLM] OLMv1 Catalogs", func() { defer g.GinkgoRecover() oc := exutil.NewCLIWithoutNamespace("default") g.It("should be installed", func(ctx g.SpecContext) { // Check for tech preview, if this is not tech preview, bail - if !exutil.IsTechPreviewNoUpgrade(ctx, oc.AdminConfigClient()) { - g.Skip("Test only runs in tech-preview") - } + checkTestSkip(ctx, oc) providedCatalogs := []string{ "openshift-certified-operators", @@ -99,7 +97,7 @@ var _ = g.Describe("[sig-olmv1] OLMv1 Catalogs", func() { }) }) -var _ = g.Describe("[sig-olmv1] OLMv1 operator installation", func() { +var _ = g.Describe("[sig-olmv1][OCPFeatureGate:NewOLM] OLMv1 operator installation", func() { defer g.GinkgoRecover() var ( @@ -122,9 +120,7 @@ var _ = g.Describe("[sig-olmv1] OLMv1 operator installation", func() { g.It("should install a cluster extension", func(ctx g.SpecContext) { // Check for tech preview, if this is not tech preview, bail - if !exutil.IsTechPreviewNoUpgrade(ctx, oc.AdminConfigClient()) { - g.Skip("Test only runs in tech-preview") - } + checkTestSkip(ctx, oc) ns := oc.Namespace() g.By(fmt.Sprintf("Updating the namespace to: %q", ns)) @@ -163,3 +159,11 @@ var _ = g.Describe("[sig-olmv1] OLMv1 operator installation", func() { o.Expect(err).NotTo(o.HaveOccurred()) }) }) + +func checkTestSkip(ctx context.Context, oc *exutil.CLI) { + cap, err := exutil.IsCapabilityEnabled(oc, configv1.ClusterVersionCapabilityOperatorLifecycleManagerV1) + o.Expect(err).NotTo(o.HaveOccurred()) + if !cap { + g.Skip("Test only runs with OLMv1 capability") + } +} diff --git a/test/extended/util/annotate/generated/zz_generated.annotations.go b/test/extended/util/annotate/generated/zz_generated.annotations.go index a7a4c6a72484..45a965e78e08 100644 --- a/test/extended/util/annotate/generated/zz_generated.annotations.go +++ b/test/extended/util/annotate/generated/zz_generated.annotations.go @@ -1703,11 +1703,11 @@ var Annotations = map[string]string{ "[sig-node][apigroup:config.openshift.io] CPU Partitioning node validation should have correct cpuset and cpushare set in crio containers": " [Suite:openshift/conformance/parallel]", - "[sig-olmv1] OLMv1 CRDs should be installed": " [Suite:openshift/conformance/parallel]", + "[sig-olmv1][OCPFeatureGate:NewOLM] OLMv1 CRDs should be installed": " [Suite:openshift/conformance/parallel]", - "[sig-olmv1] OLMv1 Catalogs should be installed": " [Suite:openshift/conformance/parallel]", + "[sig-olmv1][OCPFeatureGate:NewOLM] OLMv1 Catalogs should be installed": " [Suite:openshift/conformance/parallel]", - "[sig-olmv1] OLMv1 operator installation should install a cluster extension": " [Suite:openshift/conformance/parallel]", + "[sig-olmv1][OCPFeatureGate:NewOLM] OLMv1 operator installation should install a cluster extension": " [Suite:openshift/conformance/parallel]", "[sig-operator] OLM should Implement packages API server and list packagemanifest info with namespace not NULL [apigroup:packages.operators.coreos.com]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel]", diff --git a/test/extended/util/image/zz_generated.txt b/test/extended/util/image/zz_generated.txt index c0af550034b1..1c0f1bc04e52 100644 --- a/test/extended/util/image/zz_generated.txt +++ b/test/extended/util/image/zz_generated.txt @@ -6,7 +6,7 @@ quay.io/openshifttest/multicast:1.1 quay.io/openshift/community-e2e-images:e2e-q quay.io/redhat-developer/nfs-server:1.1 quay.io/openshift/community-e2e-images:e2e-quay-io-redhat-developer-nfs-server-1-1-dlXGfzrk5aNo8EjC quay.io/redhat-developer/test-build-roots2i:1.2 quay.io/openshift/community-e2e-images:e2e-quay-io-redhat-developer-test-build-roots2i-1-2-gLJ7WcnS2TSllJ32 quay.io/redhat-developer/test-build-simples2i:1.2 quay.io/openshift/community-e2e-images:e2e-quay-io-redhat-developer-test-build-simples2i-1-2-thirLMR-JKplfkmE -registry.k8s.io/build-image/distroless-iptables:v0.5.7 quay.io/openshift/community-e2e-images:e2e-10-registry-k8s-io-build-image-distroless-iptables-v0-5-7-dHqOZUmZe1rlSB3i +registry.k8s.io/build-image/distroless-iptables:v0.5.9 quay.io/openshift/community-e2e-images:e2e-10-registry-k8s-io-build-image-distroless-iptables-v0-5-9-TOjoKrGc37GDJceW registry.k8s.io/cloud-provider-gcp/gcp-compute-persistent-disk-csi-driver:v1.2.2 quay.io/openshift/community-e2e-images:e2e-42-registry-k8s-io-cloud-provider-gcp-gcp-compute-persistent-disk-csi-driver-v1-2-2-fk3Ddr8np00iPF9c registry.k8s.io/cloud-provider-gcp/gcp-compute-persistent-disk-csi-driver:v1.4.0 quay.io/openshift/community-e2e-images:e2e-40-registry-k8s-io-cloud-provider-gcp-gcp-compute-persistent-disk-csi-driver-v1-4-0-mUHHjVVuv0UQiTyf registry.k8s.io/e2e-test-images/agnhost:2.52 quay.io/openshift/community-e2e-images:e2e-1-registry-k8s-io-e2e-test-images-agnhost-2-52-vo_U710PrYLetnfE diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go index 61386a72e496..24e0b2f09312 100644 --- a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go @@ -288,7 +288,7 @@ const ( ) // ClusterVersionCapability enumerates optional, core cluster components. -// +kubebuilder:validation:Enum=openshift-samples;baremetal;marketplace;Console;Insights;Storage;CSISnapshot;NodeTuning;MachineAPI;Build;DeploymentConfig;ImageRegistry;OperatorLifecycleManager;CloudCredential;Ingress;CloudControllerManager +// +kubebuilder:validation:Enum=openshift-samples;baremetal;marketplace;Console;Insights;Storage;CSISnapshot;NodeTuning;MachineAPI;Build;DeploymentConfig;ImageRegistry;OperatorLifecycleManager;CloudCredential;Ingress;CloudControllerManager;OperatorLifecycleManagerV1 type ClusterVersionCapability string const ( @@ -379,10 +379,14 @@ const ( // allows to distribute Docker images ClusterVersionCapabilityImageRegistry ClusterVersionCapability = "ImageRegistry" - // ClusterVersionCapabilityOperatorLifecycleManager manages the Operator Lifecycle Manager + // ClusterVersionCapabilityOperatorLifecycleManager manages the Operator Lifecycle Manager (legacy) // which itself manages the lifecycle of operators ClusterVersionCapabilityOperatorLifecycleManager ClusterVersionCapability = "OperatorLifecycleManager" + // ClusterVersionCapabilityOperatorLifecycleManagerV1 manages the Operator Lifecycle Manager (v1) + // which itself manages the lifecycle of operators + ClusterVersionCapabilityOperatorLifecycleManagerV1 ClusterVersionCapability = "OperatorLifecycleManagerV1" + // ClusterVersionCapabilityCloudCredential manages credentials for cloud providers // in openshift cluster ClusterVersionCapabilityCloudCredential ClusterVersionCapability = "CloudCredential" @@ -422,6 +426,7 @@ var KnownClusterVersionCapabilities = []ClusterVersionCapability{ ClusterVersionCapabilityDeploymentConfig, ClusterVersionCapabilityImageRegistry, ClusterVersionCapabilityOperatorLifecycleManager, + ClusterVersionCapabilityOperatorLifecycleManagerV1, ClusterVersionCapabilityCloudCredential, ClusterVersionCapabilityIngress, ClusterVersionCapabilityCloudControllerManager, @@ -600,6 +605,7 @@ var ClusterVersionCapabilitySets = map[ClusterVersionCapabilitySet][]ClusterVers ClusterVersionCapabilityDeploymentConfig, ClusterVersionCapabilityImageRegistry, ClusterVersionCapabilityOperatorLifecycleManager, + ClusterVersionCapabilityOperatorLifecycleManagerV1, ClusterVersionCapabilityCloudCredential, ClusterVersionCapabilityIngress, ClusterVersionCapabilityCloudControllerManager, @@ -618,6 +624,7 @@ var ClusterVersionCapabilitySets = map[ClusterVersionCapabilitySet][]ClusterVers ClusterVersionCapabilityDeploymentConfig, ClusterVersionCapabilityImageRegistry, ClusterVersionCapabilityOperatorLifecycleManager, + ClusterVersionCapabilityOperatorLifecycleManagerV1, ClusterVersionCapabilityCloudCredential, ClusterVersionCapabilityIngress, ClusterVersionCapabilityCloudControllerManager, diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go index 392d128c1117..43d27c4468e6 100644 --- a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go +++ b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go @@ -507,6 +507,20 @@ type AWSPlatformStatus struct { // +listType=atomic // +optional ResourceTags []AWSResourceTag `json:"resourceTags,omitempty"` + + // cloudLoadBalancerConfig holds configuration related to DNS and cloud + // load balancers. It allows configuration of in-cluster DNS as an alternative + // to the platform default DNS implementation. + // When using the ClusterHosted DNS type, Load Balancer IP addresses + // must be provided for the API and internal API load balancers as well as the + // ingress load balancer. + // + // +default={"dnsType": "PlatformDefault"} + // +kubebuilder:default={"dnsType": "PlatformDefault"} + // +openshift:enable:FeatureGate=AWSClusterHostedDNS + // +optional + // +nullable + CloudLoadBalancerConfig *CloudLoadBalancerConfig `json:"cloudLoadBalancerConfig,omitempty"` } // AWSResourceTag is a tag to apply to AWS resources created for the cluster. @@ -647,12 +661,12 @@ type GCPPlatformStatus struct { // Tombstone the field as a reminder. // ClusterHostedDNS ClusterHostedDNS `json:"clusterHostedDNS,omitempty"` - // cloudLoadBalancerConfig is a union that contains the IP addresses of API, - // API-Int and Ingress Load Balancers created on the cloud platform. These - // values would not be populated on on-prem platforms. These Load Balancer - // IPs are used to configure the in-cluster DNS instances for API, API-Int - // and Ingress services. `dnsType` is expected to be set to `ClusterHosted` - // when these Load Balancer IP addresses are populated and used. + // cloudLoadBalancerConfig holds configuration related to DNS and cloud + // load balancers. It allows configuration of in-cluster DNS as an alternative + // to the platform default DNS implementation. + // When using the ClusterHosted DNS type, Load Balancer IP addresses + // must be provided for the API and internal API load balancers as well as the + // ingress load balancer. // // +default={"dnsType": "PlatformDefault"} // +kubebuilder:default={"dnsType": "PlatformDefault"} @@ -1579,7 +1593,7 @@ type PowerVSServiceEndpoint struct { // Power Cloud - https://cloud.ibm.com/apidocs/power-cloud // // +kubebuilder:validation:Required - // +kubebuilder:validation:Pattern=`^[a-z0-9-]+$` + // +kubebuilder:validation:Enum=CIS;COS;COSConfig;DNSServices;GlobalCatalog;GlobalSearch;GlobalTagging;HyperProtect;IAM;KeyProtect;Power;ResourceController;ResourceManager;VPC Name string `json:"name"` // url is fully qualified URI with scheme https, that overrides the default generated diff --git a/vendor/github.com/openshift/api/config/v1/types_node.go b/vendor/github.com/openshift/api/config/v1/types_node.go index b3b1b62c4dfc..a50328c91f6e 100644 --- a/vendor/github.com/openshift/api/config/v1/types_node.go +++ b/vendor/github.com/openshift/api/config/v1/types_node.go @@ -46,6 +46,25 @@ type NodeSpec struct { // the status and corresponding reaction of the cluster // +optional WorkerLatencyProfile WorkerLatencyProfileType `json:"workerLatencyProfile,omitempty"` + + // minimumKubeletVersion is the lowest version of a kubelet that can join the cluster. + // Specifically, the apiserver will deny most authorization requests of kubelets that are older + // than the specified version, only allowing the kubelet to get and update its node object, and perform + // subjectaccessreviews. + // This means any kubelet that attempts to join the cluster will not be able to run any assigned workloads, + // and will eventually be marked as not ready. + // Its max length is 8, so maximum version allowed is either "9.999.99" or "99.99.99". + // Since the kubelet reports the version of the kubernetes release, not Openshift, this field references + // the underlying kubernetes version this version of Openshift is based off of. + // In other words: if an admin wishes to ensure no nodes run an older version than Openshift 4.17, then + // they should set the minimumKubeletVersion to 1.30.0. + // When comparing versions, the kubelet's version is stripped of any contents outside of major.minor.patch version. + // Thus, a kubelet with version "1.0.0-ec.0" will be compatible with minimumKubeletVersion "1.0.0" or earlier. + // +kubebuilder:validation:XValidation:rule="self == \"\" || self.matches('^[0-9]*.[0-9]*.[0-9]*$')",message="minmumKubeletVersion must be in a semver compatible format of x.y.z, or empty" + // +kubebuilder:validation:MaxLength:=8 + // +openshift:enable:FeatureGate=MinimumKubeletVersion + // +optional + MinimumKubeletVersion string `json:"minimumKubeletVersion"` } type NodeStatus struct { diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go index 069346998473..1b7fa44aad6d 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go @@ -245,6 +245,11 @@ func (in *AWSPlatformStatus) DeepCopyInto(out *AWSPlatformStatus) { *out = make([]AWSResourceTag, len(*in)) copy(*out, *in) } + if in.CloudLoadBalancerConfig != nil { + in, out := &in.CloudLoadBalancerConfig, &out.CloudLoadBalancerConfig + *out = new(CloudLoadBalancerConfig) + (*in).DeepCopyInto(*out) + } return } diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml index fa5dd4e31d3e..abfea5eaf0e3 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml @@ -310,6 +310,7 @@ infrastructures.config.openshift.io: Capability: "" Category: "" FeatureGates: + - AWSClusterHostedDNS - BareMetalLoadBalancer - GCPClusterHostedDNS - GCPLabelsTags @@ -382,7 +383,8 @@ nodes.config.openshift.io: CRDName: nodes.config.openshift.io Capability: "" Category: "" - FeatureGates: [] + FeatureGates: + - MinimumKubeletVersion FilenameOperatorName: config-operator FilenameOperatorOrdering: "01" FilenameRunLevel: "0000_10" diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go index c580bd8342e4..145a7e4c041a 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go @@ -1184,10 +1184,11 @@ func (AWSPlatformSpec) SwaggerDoc() map[string]string { } var map_AWSPlatformStatus = map[string]string{ - "": "AWSPlatformStatus holds the current status of the Amazon Web Services infrastructure provider.", - "region": "region holds the default AWS region for new AWS resources created by the cluster.", - "serviceEndpoints": "ServiceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service.", - "resourceTags": "resourceTags is a list of additional tags to apply to AWS resources created for the cluster. See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for information on tagging AWS resources. AWS supports a maximum of 50 tags per resource. OpenShift reserves 25 tags for its use, leaving 25 tags available for the user.", + "": "AWSPlatformStatus holds the current status of the Amazon Web Services infrastructure provider.", + "region": "region holds the default AWS region for new AWS resources created by the cluster.", + "serviceEndpoints": "ServiceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service.", + "resourceTags": "resourceTags is a list of additional tags to apply to AWS resources created for the cluster. See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for information on tagging AWS resources. AWS supports a maximum of 50 tags per resource. OpenShift reserves 25 tags for its use, leaving 25 tags available for the user.", + "cloudLoadBalancerConfig": "cloudLoadBalancerConfig holds configuration related to DNS and cloud load balancers. It allows configuration of in-cluster DNS as an alternative to the platform default DNS implementation. When using the ClusterHosted DNS type, Load Balancer IP addresses must be provided for the API and internal API load balancers as well as the ingress load balancer.", } func (AWSPlatformStatus) SwaggerDoc() map[string]string { @@ -1389,7 +1390,7 @@ var map_GCPPlatformStatus = map[string]string{ "region": "region holds the region for new GCP resources created for the cluster.", "resourceLabels": "resourceLabels is a list of additional labels to apply to GCP resources created for the cluster. See https://cloud.google.com/compute/docs/labeling-resources for information on labeling GCP resources. GCP supports a maximum of 64 labels per resource. OpenShift reserves 32 labels for internal use, allowing 32 labels for user configuration.", "resourceTags": "resourceTags is a list of additional tags to apply to GCP resources created for the cluster. See https://cloud.google.com/resource-manager/docs/tags/tags-overview for information on tagging GCP resources. GCP supports a maximum of 50 tags per resource.", - "cloudLoadBalancerConfig": "cloudLoadBalancerConfig is a union that contains the IP addresses of API, API-Int and Ingress Load Balancers created on the cloud platform. These values would not be populated on on-prem platforms. These Load Balancer IPs are used to configure the in-cluster DNS instances for API, API-Int and Ingress services. `dnsType` is expected to be set to `ClusterHosted` when these Load Balancer IP addresses are populated and used.", + "cloudLoadBalancerConfig": "cloudLoadBalancerConfig holds configuration related to DNS and cloud load balancers. It allows configuration of in-cluster DNS as an alternative to the platform default DNS implementation. When using the ClusterHosted DNS type, Load Balancer IP addresses must be provided for the API and internal API load balancers as well as the ingress load balancer.", } func (GCPPlatformStatus) SwaggerDoc() map[string]string { @@ -2087,8 +2088,9 @@ func (NodeList) SwaggerDoc() map[string]string { } var map_NodeSpec = map[string]string{ - "cgroupMode": "CgroupMode determines the cgroups version on the node", - "workerLatencyProfile": "WorkerLatencyProfile determins the how fast the kubelet is updating the status and corresponding reaction of the cluster", + "cgroupMode": "CgroupMode determines the cgroups version on the node", + "workerLatencyProfile": "WorkerLatencyProfile determins the how fast the kubelet is updating the status and corresponding reaction of the cluster", + "minimumKubeletVersion": "minimumKubeletVersion is the lowest version of a kubelet that can join the cluster. Specifically, the apiserver will deny most authorization requests of kubelets that are older than the specified version, only allowing the kubelet to get and update its node object, and perform subjectaccessreviews. This means any kubelet that attempts to join the cluster will not be able to run any assigned workloads, and will eventually be marked as not ready. Its max length is 8, so maximum version allowed is either \"9.999.99\" or \"99.99.99\". Since the kubelet reports the version of the kubernetes release, not Openshift, this field references the underlying kubernetes version this version of Openshift is based off of. In other words: if an admin wishes to ensure no nodes run an older version than Openshift 4.17, then they should set the minimumKubeletVersion to 1.30.0. When comparing versions, the kubelet's version is stripped of any contents outside of major.minor.patch version. Thus, a kubelet with version \"1.0.0-ec.0\" will be compatible with minimumKubeletVersion \"1.0.0\" or earlier.", } func (NodeSpec) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_image_policy.go b/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_image_policy.go index e3670f03e899..14650fd48f4e 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_image_policy.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_image_policy.go @@ -41,6 +41,7 @@ type ClusterImagePolicySpec struct { // If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. // In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories // quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. + // If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied. // For additional details about the format, please refer to the document explaining the docker transport field, // which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker // +kubebuilder:validation:Required diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go b/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go index 7031110ff1ea..a177ddb0d66b 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go @@ -40,6 +40,7 @@ type ImagePolicySpec struct { // If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. // In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories // quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. + // If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied. // For additional details about the format, please refer to the document explaining the docker transport field, // which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker // +kubebuilder:validation:Required diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go index 9da086efc56f..55468f38dac6 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go @@ -102,7 +102,7 @@ func (ClusterImagePolicyList) SwaggerDoc() map[string]string { var map_ClusterImagePolicySpec = map[string]string{ "": "CLusterImagePolicySpec is the specification of the ClusterImagePolicy custom resource.", - "scopes": "scopes defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the \"Docker Registry HTTP API V2\". Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. For additional details about the format, please refer to the document explaining the docker transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker", + "scopes": "scopes defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the \"Docker Registry HTTP API V2\". Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied. For additional details about the format, please refer to the document explaining the docker transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker", "policy": "policy contains configuration to allow scopes to be verified, and defines how images not matching the verification policy will be treated.", } @@ -151,7 +151,7 @@ func (ImagePolicyList) SwaggerDoc() map[string]string { var map_ImagePolicySpec = map[string]string{ "": "ImagePolicySpec is the specification of the ImagePolicy CRD.", - "scopes": "scopes defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the \"Docker Registry HTTP API V2\". Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. For additional details about the format, please refer to the document explaining the docker transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker", + "scopes": "scopes defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the \"Docker Registry HTTP API V2\". Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied. For additional details about the format, please refer to the document explaining the docker transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker", "policy": "policy contains configuration to allow scopes to be verified, and defines how images not matching the verification policy will be treated.", } diff --git a/vendor/github.com/openshift/api/console/v1/types_console_plugin.go b/vendor/github.com/openshift/api/console/v1/types_console_plugin.go index 24954687d530..534a41ca761a 100644 --- a/vendor/github.com/openshift/api/console/v1/types_console_plugin.go +++ b/vendor/github.com/openshift/api/console/v1/types_console_plugin.go @@ -43,11 +43,137 @@ type ConsolePluginSpec struct { Backend ConsolePluginBackend `json:"backend"` // proxy is a list of proxies that describe various service type // to which the plugin needs to connect to. + // +listType=atomic // +optional Proxy []ConsolePluginProxy `json:"proxy,omitempty"` // i18n is the configuration of plugin's localization resources. // +optional I18n ConsolePluginI18n `json:"i18n"` + // contentSecurityPolicy is a list of Content-Security-Policy (CSP) directives for the plugin. + // Each directive specifies a list of values, appropriate for the given directive type, + // for example a list of remote endpoints for fetch directives such as ScriptSrc. + // Console web application uses CSP to detect and mitigate certain types of attacks, + // such as cross-site scripting (XSS) and data injection attacks. + // Dynamic plugins should specify this field if need to load assets from outside + // the cluster or if violation reports are observed. Dynamic plugins should always prefer + // loading their assets from within the cluster, either by vendoring them, or fetching + // from a cluster service. + // CSP violation reports can be viewed in the browser's console logs during development and + // testing of the plugin in the OpenShift web console. + // Available directive types are DefaultSrc, ScriptSrc, StyleSrc, ImgSrc and FontSrc. + // Each of the available directives may be defined only once in the list. + // The value 'self' is automatically included in all fetch directives by the OpenShift web + // console's backend. + // For more information about the CSP directives, see: + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy + // + // The OpenShift web console server aggregates the CSP directives and values across + // its own default values and all enabled ConsolePlugin CRs, merging them into a single + // policy string that is sent to the browser via `Content-Security-Policy` HTTP response header. + // + // Example: + // ConsolePlugin A directives: + // script-src: https://script1.com/, https://script2.com/ + // font-src: https://font1.com/ + // + // ConsolePlugin B directives: + // script-src: https://script2.com/, https://script3.com/ + // font-src: https://font2.com/ + // img-src: https://img1.com/ + // + // Unified set of CSP directives, passed to the OpenShift web console server: + // script-src: https://script1.com/, https://script2.com/, https://script3.com/ + // font-src: https://font1.com/, https://font2.com/ + // img-src: https://img1.com/ + // + // OpenShift web console server CSP response header: + // Content-Security-Policy: default-src 'self'; base-uri 'self'; script-src 'self' https://script1.com/ https://script2.com/ https://script3.com/; font-src 'self' https://font1.com/ https://font2.com/; img-src 'self' https://img1.com/; style-src 'self'; frame-src 'none'; object-src 'none' + // + // +openshift:enable:FeatureGate=ConsolePluginContentSecurityPolicy + // +kubebuilder:validation:MaxItems=5 + // +kubebuilder:validation:XValidation:rule="self.map(x, x.values.map(y, y.size()).sum()).sum() < 8192",message="the total combined size of values of all directives must not exceed 8192 (8kb)" + // +listType=map + // +listMapKey=directive + // +optional + ContentSecurityPolicy []ConsolePluginCSP `json:"contentSecurityPolicy"` +} + +// DirectiveType is an enumeration of OpenShift web console supported CSP directives. +// LoadType is an enumeration of i18n loading types. +// +kubebuilder:validation:Enum:="DefaultSrc";"ScriptSrc";"StyleSrc";"ImgSrc";"FontSrc" +// +enum +type DirectiveType string + +const ( + // DefaultSrc directive serves as a fallback for the other CSP fetch directives. + // For more information about the DefaultSrc directive, see: + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/default-src + DefaultSrc DirectiveType = "DefaultSrc" + // ScriptSrc directive specifies valid sources for JavaScript. + // For more information about the ScriptSrc directive, see: + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/script-src + ScriptSrc DirectiveType = "ScriptSrc" + // StyleSrc directive specifies valid sources for stylesheets. + // For more information about the StyleSrc directive, see: + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/style-src + StyleSrc DirectiveType = "StyleSrc" + // ImgSrc directive specifies a valid sources of images and favicons. + // For more information about the ImgSrc directive, see: + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/img-src + ImgSrc DirectiveType = "ImgSrc" + // FontSrc directive specifies valid sources for fonts loaded using @font-face. + // For more information about the FontSrcdirective, see: + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/font-src + FontSrc DirectiveType = "FontSrc" +) + +// CSPDirectiveValue is single value for a Content-Security-Policy directive. +// Each directive value must have a maximum length of 1024 characters and must not contain +// whitespace, commas (,), semicolons (;) or single quotes ('). The value '*' is not permitted. +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=1024 +// +kubebuilder:validation:XValidation:rule="!self.contains(\"'\")",message="CSP directive value cannot contain a quote" +// +kubebuilder:validation:XValidation:rule="!self.matches('\\\\s')",message="CSP directive value cannot contain a whitespace" +// +kubebuilder:validation:XValidation:rule="!self.contains(',')",message="CSP directive value cannot contain a comma" +// +kubebuilder:validation:XValidation:rule="!self.contains(';')",message="CSP directive value cannot contain a semi-colon" +// +kubebuilder:validation:XValidation:rule="self != '*'",message="CSP directive value cannot be a wildcard" +type CSPDirectiveValue string + +// ConsolePluginCSP holds configuration for a specific CSP directive +type ConsolePluginCSP struct { + // directive specifies which Content-Security-Policy directive to configure. + // Available directive types are DefaultSrc, ScriptSrc, StyleSrc, ImgSrc and FontSrc. + // DefaultSrc directive serves as a fallback for the other CSP fetch directives. + // For more information about the DefaultSrc directive, see: + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/default-src + // ScriptSrc directive specifies valid sources for JavaScript. + // For more information about the ScriptSrc directive, see: + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/script-src + // StyleSrc directive specifies valid sources for stylesheets. + // For more information about the StyleSrc directive, see: + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/style-src + // ImgSrc directive specifies a valid sources of images and favicons. + // For more information about the ImgSrc directive, see: + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/img-src + // FontSrc directive specifies valid sources for fonts loaded using @font-face. + // For more information about the FontSrc directive, see: + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/font-src + // +kubebuilder:validation:Required + Directive DirectiveType `json:"directive"` + // values defines an array of values to append to the console defaults for this directive. + // Each ConsolePlugin may define their own directives with their values. These will be set + // by the OpenShift web console's backend, as part of its Content-Security-Policy header. + // The array can contain at most 16 values. Each directive value must have a maximum length + // of 1024 characters and must not contain whitespace, commas (,), semicolons (;) or single + // quotes ('). The value '*' is not permitted. + // Each value in the array must be unique. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=16 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))",message="each CSP directive value must be unique" + // +listType=atomic + Values []CSPDirectiveValue `json:"values"` } // LoadType is an enumeration of i18n loading types diff --git a/vendor/github.com/openshift/api/console/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/console/v1/zz_generated.deepcopy.go index b7cd66da0cbb..d4fefaa37c25 100644 --- a/vendor/github.com/openshift/api/console/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/console/v1/zz_generated.deepcopy.go @@ -416,6 +416,27 @@ func (in *ConsolePluginBackend) DeepCopy() *ConsolePluginBackend { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsolePluginCSP) DeepCopyInto(out *ConsolePluginCSP) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]CSPDirectiveValue, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsolePluginCSP. +func (in *ConsolePluginCSP) DeepCopy() *ConsolePluginCSP { + if in == nil { + return nil + } + out := new(ConsolePluginCSP) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConsolePluginI18n) DeepCopyInto(out *ConsolePluginI18n) { *out = *in @@ -547,6 +568,13 @@ func (in *ConsolePluginSpec) DeepCopyInto(out *ConsolePluginSpec) { } } out.I18n = in.I18n + if in.ContentSecurityPolicy != nil { + in, out := &in.ContentSecurityPolicy, &out.ContentSecurityPolicy + *out = make([]ConsolePluginCSP, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } diff --git a/vendor/github.com/openshift/api/console/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/console/v1/zz_generated.featuregated-crd-manifests.yaml index 98abc7147b9f..250f873a0924 100644 --- a/vendor/github.com/openshift/api/console/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/console/v1/zz_generated.featuregated-crd-manifests.yaml @@ -137,7 +137,8 @@ consoleplugins.console.openshift.io: CRDName: consoleplugins.console.openshift.io Capability: Console Category: "" - FeatureGates: [] + FeatureGates: + - ConsolePluginContentSecurityPolicy FilenameOperatorName: "" FilenameOperatorOrdering: "90" FilenameRunLevel: "" diff --git a/vendor/github.com/openshift/api/console/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/console/v1/zz_generated.swagger_doc_generated.go index c6f2070fa4a1..685cb7ea276b 100644 --- a/vendor/github.com/openshift/api/console/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/console/v1/zz_generated.swagger_doc_generated.go @@ -187,6 +187,16 @@ func (ConsolePluginBackend) SwaggerDoc() map[string]string { return map_ConsolePluginBackend } +var map_ConsolePluginCSP = map[string]string{ + "": "ConsolePluginCSP holds configuration for a specific CSP directive", + "directive": "directive specifies which Content-Security-Policy directive to configure. Available directive types are DefaultSrc, ScriptSrc, StyleSrc, ImgSrc and FontSrc. DefaultSrc directive serves as a fallback for the other CSP fetch directives. For more information about the DefaultSrc directive, see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/default-src ScriptSrc directive specifies valid sources for JavaScript. For more information about the ScriptSrc directive, see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/script-src StyleSrc directive specifies valid sources for stylesheets. For more information about the StyleSrc directive, see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/style-src ImgSrc directive specifies a valid sources of images and favicons. For more information about the ImgSrc directive, see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/img-src FontSrc directive specifies valid sources for fonts loaded using @font-face. For more information about the FontSrc directive, see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/font-src", + "values": "values defines an array of values to append to the console defaults for this directive. Each ConsolePlugin may define their own directives with their values. These will be set by the OpenShift web console's backend, as part of its Content-Security-Policy header. The array can contain at most 16 values. Each directive value must have a maximum length of 1024 characters and must not contain whitespace, commas (,), semicolons (;) or single quotes ('). The value '*' is not permitted. Each value in the array must be unique.", +} + +func (ConsolePluginCSP) SwaggerDoc() map[string]string { + return map_ConsolePluginCSP +} + var map_ConsolePluginI18n = map[string]string{ "": "ConsolePluginI18n holds information on localization resources that are served by the dynamic plugin.", "loadType": "loadType indicates how the plugin's localization resource should be loaded. Valid values are Preload, Lazy and the empty string. When set to Preload, all localization resources are fetched when the plugin is loaded. When set to Lazy, localization resources are lazily loaded as and when they are required by the console. When omitted or set to the empty string, the behaviour is equivalent to Lazy type.", @@ -251,11 +261,12 @@ func (ConsolePluginService) SwaggerDoc() map[string]string { } var map_ConsolePluginSpec = map[string]string{ - "": "ConsolePluginSpec is the desired plugin configuration.", - "displayName": "displayName is the display name of the plugin. The dispalyName should be between 1 and 128 characters.", - "backend": "backend holds the configuration of backend which is serving console's plugin .", - "proxy": "proxy is a list of proxies that describe various service type to which the plugin needs to connect to.", - "i18n": "i18n is the configuration of plugin's localization resources.", + "": "ConsolePluginSpec is the desired plugin configuration.", + "displayName": "displayName is the display name of the plugin. The dispalyName should be between 1 and 128 characters.", + "backend": "backend holds the configuration of backend which is serving console's plugin .", + "proxy": "proxy is a list of proxies that describe various service type to which the plugin needs to connect to.", + "i18n": "i18n is the configuration of plugin's localization resources.", + "contentSecurityPolicy": "contentSecurityPolicy is a list of Content-Security-Policy (CSP) directives for the plugin. Each directive specifies a list of values, appropriate for the given directive type, for example a list of remote endpoints for fetch directives such as ScriptSrc. Console web application uses CSP to detect and mitigate certain types of attacks, such as cross-site scripting (XSS) and data injection attacks. Dynamic plugins should specify this field if need to load assets from outside the cluster or if violation reports are observed. Dynamic plugins should always prefer loading their assets from within the cluster, either by vendoring them, or fetching from a cluster service. CSP violation reports can be viewed in the browser's console logs during development and testing of the plugin in the OpenShift web console. Available directive types are DefaultSrc, ScriptSrc, StyleSrc, ImgSrc and FontSrc. Each of the available directives may be defined only once in the list. The value 'self' is automatically included in all fetch directives by the OpenShift web console's backend. For more information about the CSP directives, see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy\n\nThe OpenShift web console server aggregates the CSP directives and values across its own default values and all enabled ConsolePlugin CRs, merging them into a single policy string that is sent to the browser via `Content-Security-Policy` HTTP response header.\n\nExample:\n ConsolePlugin A directives:\n script-src: https://script1.com/, https://script2.com/\n font-src: https://font1.com/\n\n ConsolePlugin B directives:\n script-src: https://script2.com/, https://script3.com/\n font-src: https://font2.com/\n img-src: https://img1.com/\n\n Unified set of CSP directives, passed to the OpenShift web console server:\n script-src: https://script1.com/, https://script2.com/, https://script3.com/\n font-src: https://font1.com/, https://font2.com/\n img-src: https://img1.com/\n\n OpenShift web console server CSP response header:\n Content-Security-Policy: default-src 'self'; base-uri 'self'; script-src 'self' https://script1.com/ https://script2.com/ https://script3.com/; font-src 'self' https://font1.com/ https://font2.com/; img-src 'self' https://img1.com/; style-src 'self'; frame-src 'none'; object-src 'none'", } func (ConsolePluginSpec) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/envtest-releases.yaml b/vendor/github.com/openshift/api/envtest-releases.yaml index fa789b18cea2..5651bbcc9d4c 100644 --- a/vendor/github.com/openshift/api/envtest-releases.yaml +++ b/vendor/github.com/openshift/api/envtest-releases.yaml @@ -12,3 +12,16 @@ releases: envtest-v1.30.3-linux-arm64.tar.gz: hash: deb395d5e9578a58786c42b4e7d878b4aef984ac2dce510031fbecf12092162a4aee1cde774f1527cfae90f6885382dc7b3d79ec379b7f4160c3a35fad7cbc3b selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.30.3-linux-arm64.tar.gz + v1.31.1: + envtest-v1.31.1-darwin-amd64.tar.gz: + hash: c884c6a9751f12f57ede0dc3d8dfffdb0f60f7111d6d01ca0693b66d663dfbd37c21ab6a9e571d1a6f649ed7db54b04b069ab0aff6366b2db2f5d3d8ba84a296 + selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.31.1-darwin-amd64.tar.gz + envtest-v1.31.1-darwin-arm64.tar.gz: + hash: c760be21c579a516cad8fbafd0f202229f5e074da1869958b84ae8dca295ffb33eb6fd4fd0b66349c31c4adff1561e7dd188137885e3661e34c0a14e12ada20e + selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.31.1-darwin-arm64.tar.gz + envtest-v1.31.1-linux-amd64.tar.gz: + hash: a683fad736249b681d50c40715068ecb64f3ef22a85f29387eb61435c36dfe0cebf0bc7e109e237071cd856bc0e37d79a732309fd8d0b16fba6e019cf5c6e8b6 + selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.31.1-linux-amd64.tar.gz + envtest-v1.31.1-linux-arm64.tar.gz: + hash: 86fa42c6a3d92e438e35d6066587d0e4f36b910885e10520868959ece2fe740d99abc735f69d6ebe8920291f70d3819b169ad5ddd2db805f8f56a3b83eee3893 + selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.31.1-linux-arm64.tar.gz diff --git a/vendor/github.com/openshift/api/features.md b/vendor/github.com/openshift/api/features.md index e668fc88b366..e5769fbce938 100644 --- a/vendor/github.com/openshift/api/features.md +++ b/vendor/github.com/openshift/api/features.md @@ -7,26 +7,29 @@ | MachineAPIOperatorDisableMachineHealthCheckController| | | | | | | | MultiArchInstallAzure| | | | | | | | GatewayAPI| | | Enabled | Enabled | | | +| AWSClusterHostedDNS| | | Enabled | Enabled | Enabled | Enabled | | AdditionalRoutingCapabilities| | | Enabled | Enabled | Enabled | Enabled | | AutomatedEtcdBackup| | | Enabled | Enabled | Enabled | Enabled | | BootcNodeManagement| | | Enabled | Enabled | Enabled | Enabled | | CSIDriverSharedResource| | | Enabled | Enabled | Enabled | Enabled | | ClusterMonitoringConfig| | | Enabled | Enabled | Enabled | Enabled | +| ConsolePluginContentSecurityPolicy| | | Enabled | Enabled | Enabled | Enabled | | DNSNameResolver| | | Enabled | Enabled | Enabled | Enabled | | DynamicResourceAllocation| | | Enabled | Enabled | Enabled | Enabled | | EtcdBackendQuota| | | Enabled | Enabled | Enabled | Enabled | | Example| | | Enabled | Enabled | Enabled | Enabled | | GCPClusterHostedDNS| | | Enabled | Enabled | Enabled | Enabled | | ImageStreamImportMode| | | Enabled | Enabled | Enabled | Enabled | +| IngressControllerDynamicConfigurationManager| | | Enabled | Enabled | Enabled | Enabled | | InsightsConfig| | | Enabled | Enabled | Enabled | Enabled | | InsightsConfigAPI| | | Enabled | Enabled | Enabled | Enabled | | InsightsOnDemandDataGather| | | Enabled | Enabled | Enabled | Enabled | | InsightsRuntimeExtractor| | | Enabled | Enabled | Enabled | Enabled | | MachineAPIProviderOpenStack| | | Enabled | Enabled | Enabled | Enabled | | MachineConfigNodes| | | Enabled | Enabled | Enabled | Enabled | -| ManagedBootImagesAWS| | | Enabled | Enabled | Enabled | Enabled | | MaxUnavailableStatefulSet| | | Enabled | Enabled | Enabled | Enabled | | MetricsCollectionProfiles| | | Enabled | Enabled | Enabled | Enabled | +| MinimumKubeletVersion| | | Enabled | Enabled | Enabled | Enabled | | MixedCPUsAllocation| | | Enabled | Enabled | Enabled | Enabled | | NetworkSegmentation| | | Enabled | Enabled | Enabled | Enabled | | NewOLM| | | Enabled | Enabled | Enabled | Enabled | @@ -48,6 +51,7 @@ | UserNamespacesSupport| | | Enabled | Enabled | Enabled | Enabled | | VSphereMultiNetworks| | | Enabled | Enabled | Enabled | Enabled | | VSphereMultiVCenters| | | Enabled | Enabled | Enabled | Enabled | +| VolumeAttributesClass| | | Enabled | Enabled | Enabled | Enabled | | VolumeGroupSnapshot| | | Enabled | Enabled | Enabled | Enabled | | ExternalOIDC| Enabled | | Enabled | Enabled | Enabled | Enabled | | AWSEFSDriverVolumeMetrics| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | @@ -64,7 +68,7 @@ | IngressControllerLBSubnetsAWS| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | KMSv1| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | ManagedBootImages| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| MetricsServer| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| ManagedBootImagesAWS| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | MultiArchInstallAWS| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | MultiArchInstallGCP| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | NetworkDiagnosticsConfig| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | diff --git a/vendor/github.com/openshift/api/features/features.go b/vendor/github.com/openshift/api/features/features.go index 7f4962395432..f127d8ffb96c 100644 --- a/vendor/github.com/openshift/api/features/features.go +++ b/vendor/github.com/openshift/api/features/features.go @@ -36,10 +36,19 @@ func AllFeatureSets() map[ClusterProfileName]map[configv1.FeatureSet]*FeatureGat var ( allFeatureGates = map[ClusterProfileName]map[configv1.FeatureSet]*FeatureGateEnabledDisabled{} + FeatureGateConsolePluginCSP = newFeatureGate("ConsolePluginContentSecurityPolicy"). + reportProblemsToJiraComponent("Management Console"). + contactPerson("jhadvig"). + productScope(ocpSpecific). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enhancementPR("https://github.com/openshift/enhancements/pull/1706"). + mustRegister() + FeatureGateServiceAccountTokenNodeBinding = newFeatureGate("ServiceAccountTokenNodeBinding"). reportProblemsToJiraComponent("apiserver-auth"). contactPerson("stlaz"). productScope(kubernetes). + enhancementPR("https://github.com/kubernetes/enhancements/issues/4193"). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -47,6 +56,7 @@ var ( reportProblemsToJiraComponent("kube-apiserver"). contactPerson("benluddy"). productScope(kubernetes). + enhancementPR("https://github.com/kubernetes/enhancements/issues/3488"). enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -54,6 +64,7 @@ var ( reportProblemsToJiraComponent("Routing"). contactPerson("miciah"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.DevPreviewNoUpgrade). mustRegister() @@ -61,6 +72,7 @@ var ( reportProblemsToJiraComponent("Networking / router"). contactPerson("miheer"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -68,6 +80,7 @@ var ( reportProblemsToJiraComponent("auth"). contactPerson("ibihim"). productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/899"). enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -75,6 +88,7 @@ var ( reportProblemsToJiraComponent("builds"). contactPerson("adkaplan"). productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1056"). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -82,6 +96,7 @@ var ( reportProblemsToJiraComponent("builds"). contactPerson("adkaplan"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -89,6 +104,7 @@ var ( reportProblemsToJiraComponent("node"). contactPerson("ehashman"). productScope(kubernetes). + enhancementPR("https://github.com/kubernetes/enhancements/issues/2400"). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -96,6 +112,7 @@ var ( reportProblemsToJiraComponent("openstack"). contactPerson("egarcia"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -103,6 +120,7 @@ var ( reportProblemsToJiraComponent("insights"). contactPerson("tremes"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -110,6 +128,7 @@ var ( reportProblemsToJiraComponent("insights"). contactPerson("jmesnil"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -117,6 +136,7 @@ var ( reportProblemsToJiraComponent("scheduling"). contactPerson("jchaloup"). productScope(kubernetes). + enhancementPR("https://github.com/kubernetes/enhancements/issues/4381"). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -124,6 +144,7 @@ var ( reportProblemsToJiraComponent("cloud-credential-operator"). contactPerson("abutcher"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -131,6 +152,7 @@ var ( reportProblemsToJiraComponent("apps"). contactPerson("atiratree"). productScope(kubernetes). + enhancementPR("https://github.com/kubernetes/enhancements/issues/961"). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -138,12 +160,14 @@ var ( reportProblemsToJiraComponent("node"). contactPerson("sairameshv"). productScope(kubernetes). + enhancementPR("https://github.com/kubernetes/enhancements/issues/3386"). mustRegister() FeatureGatePrivateHostedZoneAWS = newFeatureGate("PrivateHostedZoneAWS"). reportProblemsToJiraComponent("Routing"). contactPerson("miciah"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -151,6 +175,7 @@ var ( reportProblemsToJiraComponent("node"). contactPerson("sgrunert"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -158,6 +183,7 @@ var ( reportProblemsToJiraComponent("Installer"). contactPerson("bhb"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -165,6 +191,7 @@ var ( reportProblemsToJiraComponent("cloud-provider"). contactPerson("jspeed"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -172,6 +199,7 @@ var ( reportProblemsToJiraComponent("machine-config-operator/platform-baremetal"). contactPerson("mkowalsk"). productScope(kubernetes). + enhancementPR("https://github.com/kubernetes/enhancements/issues/3705"). enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -179,6 +207,7 @@ var ( reportProblemsToJiraComponent("splat"). contactPerson("vr4manta"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -186,6 +215,7 @@ var ( reportProblemsToJiraComponent("splat"). contactPerson("rvanderp3"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -193,6 +223,7 @@ var ( reportProblemsToJiraComponent("router"). contactPerson("thejasn"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -200,6 +231,7 @@ var ( reportProblemsToJiraComponent("Networking/ovn-kubernetes"). contactPerson("tssurya"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -207,6 +239,7 @@ var ( reportProblemsToJiraComponent("Networking/ovn-kubernetes"). contactPerson("tssurya"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -214,6 +247,7 @@ var ( reportProblemsToJiraComponent("Networking/cluster-network-operator"). contactPerson("jcaamano"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -221,6 +255,7 @@ var ( reportProblemsToJiraComponent("Networking/ovn-kubernetes"). contactPerson("jcaamano"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -228,6 +263,7 @@ var ( reportProblemsToJiraComponent("Networking/ovn-kubernetes"). contactPerson("pliu"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -235,6 +271,7 @@ var ( reportProblemsToJiraComponent("Networking/cluster-network-operator"). contactPerson("kyrtapz"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -242,6 +279,7 @@ var ( reportProblemsToJiraComponent("Networking"). contactPerson("npinaeva"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -249,6 +287,7 @@ var ( reportProblemsToJiraComponent("etcd"). contactPerson("hasbro17"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -256,6 +295,7 @@ var ( reportProblemsToJiraComponent("etcd"). contactPerson("hasbro17"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -263,6 +303,7 @@ var ( reportProblemsToJiraComponent("etcd"). contactPerson("hasbro17"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -270,12 +311,14 @@ var ( reportProblemsToJiraComponent("ecoproject"). contactPerson("msluiter"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). mustRegister() FeatureGateDNSNameResolver = newFeatureGate("DNSNameResolver"). reportProblemsToJiraComponent("dns"). contactPerson("miciah"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -283,6 +326,7 @@ var ( reportProblemsToJiraComponent("splat"). contactPerson("rvanderp3"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -290,6 +334,7 @@ var ( reportProblemsToJiraComponent("MachineConfigOperator"). contactPerson("cdoern"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -297,19 +342,22 @@ var ( reportProblemsToJiraComponent("Installer"). contactPerson("vincepri"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). mustRegister() - FeatureGateMetricsServer = newFeatureGate("MetricsServer"). - reportProblemsToJiraComponent("Monitoring"). - contactPerson("slashpai"). + FeatureGateGCPClusterHostedDNS = newFeatureGate("GCPClusterHostedDNS"). + reportProblemsToJiraComponent("Installer"). + contactPerson("barbacbd"). productScope(ocpSpecific). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() - FeatureGateGCPClusterHostedDNS = newFeatureGate("GCPClusterHostedDNS"). + FeatureGateAWSClusterHostedDNS = newFeatureGate("AWSClusterHostedDNS"). reportProblemsToJiraComponent("Installer"). contactPerson("barbacbd"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -317,6 +365,7 @@ var ( reportProblemsToJiraComponent("NodeTuningOperator"). contactPerson("titzhak"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -324,6 +373,7 @@ var ( reportProblemsToJiraComponent("MachineConfigOperator"). contactPerson("djoshy"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -331,13 +381,15 @@ var ( reportProblemsToJiraComponent("MachineConfigOperator"). contactPerson("djoshy"). productScope(ocpSpecific). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateDisableKubeletCloudCredentialProviders = newFeatureGate("DisableKubeletCloudCredentialProviders"). reportProblemsToJiraComponent("cloud-provider"). contactPerson("jspeed"). productScope(kubernetes). + enhancementPR("https://github.com/kubernetes/enhancements/issues/2395"). enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -345,6 +397,7 @@ var ( reportProblemsToJiraComponent("MachineConfigOperator"). contactPerson("dkhater"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -352,6 +405,7 @@ var ( reportProblemsToJiraComponent("MachineConfigOperator"). contactPerson("inesqyx"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -359,6 +413,7 @@ var ( reportProblemsToJiraComponent("Cluster Version Operator"). contactPerson("lmohanty"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -366,6 +421,7 @@ var ( reportProblemsToJiraComponent("kube-apiserver"). contactPerson("dgrisonnet"). productScope(kubernetes). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -373,6 +429,7 @@ var ( reportProblemsToJiraComponent("MachineConfigOperator"). contactPerson("jhernand"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -380,6 +437,7 @@ var ( reportProblemsToJiraComponent("Cluster Version Operator"). contactPerson("pmuller"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -387,13 +445,23 @@ var ( reportProblemsToJiraComponent("kube-apiserver"). contactPerson("akashem"). productScope(kubernetes). + enhancementPR("https://github.com/kubernetes/enhancements/issues/4006"). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() + FeatureGateVolumeAttributesClass = newFeatureGate("VolumeAttributesClass"). + reportProblemsToJiraComponent("Storage / Kubernetes External Components"). + contactPerson("dfajmon"). + productScope(kubernetes). + enhancementPR("https://github.com/kubernetes/enhancements/issues/3751"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + FeatureGateVolumeGroupSnapshot = newFeatureGate("VolumeGroupSnapshot"). reportProblemsToJiraComponent("Storage / Kubernetes External Components"). contactPerson("fbertina"). productScope(kubernetes). + enhancementPR("https://github.com/kubernetes/enhancements/issues/3476"). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -401,6 +469,7 @@ var ( reportProblemsToJiraComponent("authentication"). contactPerson("liouk"). productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1596"). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). enableForClusterProfile(Hypershift, configv1.Default, configv1.TechPreviewNoUpgrade). mustRegister() @@ -409,6 +478,7 @@ var ( reportProblemsToJiraComponent("cluster-config"). contactPerson("deads"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -416,6 +486,7 @@ var ( reportProblemsToJiraComponent("olm"). contactPerson("joe"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -423,6 +494,7 @@ var ( reportProblemsToJiraComponent("olm"). contactPerson("joe"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -430,6 +502,7 @@ var ( reportProblemsToJiraComponent("insights"). contactPerson("tremes"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -437,6 +510,7 @@ var ( reportProblemsToJiraComponent("metal"). contactPerson("EmilienM"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -444,6 +518,7 @@ var ( reportProblemsToJiraComponent("insights"). contactPerson("tremes"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -451,6 +526,7 @@ var ( reportProblemsToJiraComponent("MachineConfigOperator"). contactPerson("jerzhang"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -458,6 +534,7 @@ var ( reportProblemsToJiraComponent("Monitoring"). contactPerson("rexagod"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -465,6 +542,7 @@ var ( reportProblemsToJiraComponent("Storage / Kubernetes External Components"). contactPerson("rbednar"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -472,12 +550,14 @@ var ( reportProblemsToJiraComponent("Installer"). contactPerson("cjschaef"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). mustRegister() FeatureGateChunkSizeMiB = newFeatureGate("ChunkSizeMiB"). reportProblemsToJiraComponent("Image Registry"). contactPerson("flavianmissi"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -485,12 +565,14 @@ var ( reportProblemsToJiraComponent("OCPCLOUD"). contactPerson("jspeed"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). mustRegister() FeatureGatePersistentIPsForVirtualization = newFeatureGate("PersistentIPsForVirtualization"). reportProblemsToJiraComponent("CNV Network"). contactPerson("mduarted"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -498,6 +580,7 @@ var ( reportProblemsToJiraComponent("Monitoring"). contactPerson("marioferh"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -505,6 +588,7 @@ var ( reportProblemsToJiraComponent("Installer"). contactPerson("r4f4"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -512,12 +596,14 @@ var ( reportProblemsToJiraComponent("Installer"). contactPerson("r4f4"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). mustRegister() FeatureGateMultiArchInstallGCP = newFeatureGate("MultiArchInstallGCP"). reportProblemsToJiraComponent("Installer"). contactPerson("r4f4"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -525,6 +611,7 @@ var ( reportProblemsToJiraComponent("Routing"). contactPerson("miciah"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -532,6 +619,7 @@ var ( reportProblemsToJiraComponent("Storage / Kubernetes External Components"). contactPerson("fbertina"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -539,6 +627,7 @@ var ( reportProblemsToJiraComponent("Multi-Arch"). contactPerson("psundara"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -546,6 +635,7 @@ var ( reportProblemsToJiraComponent("Node"). contactPerson("haircommander"). productScope(kubernetes). + enhancementPR("https://github.com/kubernetes/enhancements/issues/127"). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -553,6 +643,7 @@ var ( reportProblemsToJiraComponent("Node"). contactPerson("haircommander"). productScope(kubernetes). + enhancementPR("https://github.com/kubernetes/enhancements/issues/127"). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -560,6 +651,7 @@ var ( reportProblemsToJiraComponent("Node"). contactPerson("haircommander"). productScope(kubernetes). + enhancementPR("https://github.com/kubernetes/enhancements/issues/4265"). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -567,6 +659,23 @@ var ( reportProblemsToJiraComponent("SPLAT"). contactPerson("rvanderp"). productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() + + FeatureGateIngressControllerDynamicConfigurationManager = newFeatureGate("IngressControllerDynamicConfigurationManager"). + reportProblemsToJiraComponent("Networking/router"). + contactPerson("miciah"). + productScope(ocpSpecific). + enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateMinimumKubeletVersion = newFeatureGate("MinimumKubeletVersion"). + reportProblemsToJiraComponent("Node"). + contactPerson("haircommander"). + productScope(ocpSpecific). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enhancementPR("https://github.com/openshift/enhancements/pull/1697"). + mustRegister() ) diff --git a/vendor/github.com/openshift/api/features/legacyfeaturegates.go b/vendor/github.com/openshift/api/features/legacyfeaturegates.go new file mode 100644 index 000000000000..06e1600e0d91 --- /dev/null +++ b/vendor/github.com/openshift/api/features/legacyfeaturegates.go @@ -0,0 +1,129 @@ +package features + +import "k8s.io/apimachinery/pkg/util/sets" + +var legacyFeatureGates = sets.New( + "AWSClusterHostedDNS", + // never add to this list, if you think you have an exception ask @deads2k + "AWSEFSDriverVolumeMetrics", + // never add to this list, if you think you have an exception ask @deads2k + "AdditionalRoutingCapabilities", + // never add to this list, if you think you have an exception ask @deads2k + "AdminNetworkPolicy", + // never add to this list, if you think you have an exception ask @deads2k + "AlibabaPlatform", + // never add to this list, if you think you have an exception ask @deads2k + "AutomatedEtcdBackup", + // never add to this list, if you think you have an exception ask @deads2k + "AzureWorkloadIdentity", + // never add to this list, if you think you have an exception ask @deads2k + "BareMetalLoadBalancer", + // never add to this list, if you think you have an exception ask @deads2k + "BootcNodeManagement", + // never add to this list, if you think you have an exception ask @deads2k + "BuildCSIVolumes", + // never add to this list, if you think you have an exception ask @deads2k + "ChunkSizeMiB", + // never add to this list, if you think you have an exception ask @deads2k + "ClusterAPIInstall", + // never add to this list, if you think you have an exception ask @deads2k + "ClusterAPIInstallIBMCloud", + // never add to this list, if you think you have an exception ask @deads2k + "ClusterMonitoringConfig", + // never add to this list, if you think you have an exception ask @deads2k + "DNSNameResolver", + // never add to this list, if you think you have an exception ask @deads2k + "EtcdBackendQuota", + // never add to this list, if you think you have an exception ask @deads2k + "Example", + // never add to this list, if you think you have an exception ask @deads2k + "GCPClusterHostedDNS", + // never add to this list, if you think you have an exception ask @deads2k + "GCPLabelsTags", + // never add to this list, if you think you have an exception ask @deads2k + "GatewayAPI", + // never add to this list, if you think you have an exception ask @deads2k + "HardwareSpeed", + // never add to this list, if you think you have an exception ask @deads2k + "ImageStreamImportMode", + // never add to this list, if you think you have an exception ask @deads2k + "IngressControllerDynamicConfigurationManager", + // never add to this list, if you think you have an exception ask @deads2k + "IngressControllerLBSubnetsAWS", + // never add to this list, if you think you have an exception ask @deads2k + "InsightsConfig", + // never add to this list, if you think you have an exception ask @deads2k + "InsightsConfigAPI", + // never add to this list, if you think you have an exception ask @deads2k + "InsightsOnDemandDataGather", + // never add to this list, if you think you have an exception ask @deads2k + "InsightsRuntimeExtractor", + // never add to this list, if you think you have an exception ask @deads2k + "KMSv1", + // never add to this list, if you think you have an exception ask @deads2k + "MachineAPIMigration", + // never add to this list, if you think you have an exception ask @deads2k + "MachineAPIOperatorDisableMachineHealthCheckController", + // never add to this list, if you think you have an exception ask @deads2k + "MachineAPIProviderOpenStack", + // never add to this list, if you think you have an exception ask @deads2k + "MachineConfigNodes", + // never add to this list, if you think you have an exception ask @deads2k + "ManagedBootImages", + // never add to this list, if you think you have an exception ask @deads2k + "ManagedBootImagesAWS", + // never add to this list, if you think you have an exception ask @deads2k + "MetricsCollectionProfiles", + // never add to this list, if you think you have an exception ask @deads2k + "MixedCPUsAllocation", + // never add to this list, if you think you have an exception ask @deads2k + "MultiArchInstallAWS", + // never add to this list, if you think you have an exception ask @deads2k + "MultiArchInstallAzure", + // never add to this list, if you think you have an exception ask @deads2k + "MultiArchInstallGCP", + // never add to this list, if you think you have an exception ask @deads2k + "NetworkDiagnosticsConfig", + // never add to this list, if you think you have an exception ask @deads2k + "NetworkLiveMigration", + // never add to this list, if you think you have an exception ask @deads2k + "NetworkSegmentation", + // never add to this list, if you think you have an exception ask @deads2k + "NewOLM", + // never add to this list, if you think you have an exception ask @deads2k + "NodeDisruptionPolicy", + // never add to this list, if you think you have an exception ask @deads2k + "OVNObservability", + // never add to this list, if you think you have an exception ask @deads2k + "OnClusterBuild", + // never add to this list, if you think you have an exception ask @deads2k + "PersistentIPsForVirtualization", + // never add to this list, if you think you have an exception ask @deads2k + "PinnedImages", + // never add to this list, if you think you have an exception ask @deads2k + "PlatformOperators", + // never add to this list, if you think you have an exception ask @deads2k + "PrivateHostedZoneAWS", + // never add to this list, if you think you have an exception ask @deads2k + "RouteAdvertisements", + // never add to this list, if you think you have an exception ask @deads2k + "RouteExternalCertificate", + // never add to this list, if you think you have an exception ask @deads2k + "SetEIPForNLBIngressController", + // never add to this list, if you think you have an exception ask @deads2k + "SignatureStores", + // never add to this list, if you think you have an exception ask @deads2k + "SigstoreImageVerification", + // never add to this list, if you think you have an exception ask @deads2k + "UpgradeStatus", + // never add to this list, if you think you have an exception ask @deads2k + "VSphereControlPlaneMachineSet", + // never add to this list, if you think you have an exception ask @deads2k + "VSphereDriverConfiguration", + // never add to this list, if you think you have an exception ask @deads2k + "VSphereMultiNetworks", + // never add to this list, if you think you have an exception ask @deads2k + "VSphereMultiVCenters", + // never add to this list, if you think you have an exception ask @deads2k + "VSphereStaticIPs", +) diff --git a/vendor/github.com/openshift/api/features/util.go b/vendor/github.com/openshift/api/features/util.go index d8d8e94a0e9e..59bb7bff4078 100644 --- a/vendor/github.com/openshift/api/features/util.go +++ b/vendor/github.com/openshift/api/features/util.go @@ -3,6 +3,8 @@ package features import ( "fmt" configv1 "github.com/openshift/api/config/v1" + "net/url" + "strings" ) // FeatureGateDescription is a golang-only interface used to contains details for a feature gate. @@ -18,6 +20,8 @@ type FeatureGateDescription struct { ResponsiblePerson string // OwningProduct is the product that owns the lifecycle of the gate. OwningProduct OwningProduct + // EnhancementPR is the PR for the enhancement. + EnhancementPR string } type FeatureGateEnabledDisabled struct { @@ -45,10 +49,15 @@ type featureGateBuilder struct { owningJiraComponent string responsiblePerson string owningProduct OwningProduct + enhancementPRURL string statusByClusterProfileByFeatureSet map[ClusterProfileName]map[configv1.FeatureSet]bool } +const ( + legacyFeatureGateWithoutEnhancement = "FeatureGate predates 4.18" +) + // newFeatureGate featuregate are disabled in every FeatureSet and selectively enabled func newFeatureGate(name string) *featureGateBuilder { b := &featureGateBuilder{ @@ -80,6 +89,11 @@ func (b *featureGateBuilder) productScope(owningProduct OwningProduct) *featureG return b } +func (b *featureGateBuilder) enhancementPR(url string) *featureGateBuilder { + b.enhancementPRURL = url + return b +} + func (b *featureGateBuilder) enableIn(featureSets ...configv1.FeatureSet) *featureGateBuilder { for clusterProfile := range b.statusByClusterProfileByFeatureSet { for _, featureSet := range featureSets { @@ -109,6 +123,22 @@ func (b *featureGateBuilder) register() (configv1.FeatureGateName, error) { if len(b.owningProduct) == 0 { return "", fmt.Errorf("missing owningProduct") } + _, enhancementPRErr := url.Parse(b.enhancementPRURL) + switch { + case b.enhancementPRURL == legacyFeatureGateWithoutEnhancement: + if !legacyFeatureGates.Has(b.name) { + return "", fmt.Errorf("FeatureGate/%s is a new feature gate, not an existing one. It must have an enhancementPR with GA Graduation Criteria like https://github.com/openshift/enhancements/pull/#### or https://github.com/kubernetes/enhancements/issues/####", b.name) + } + + case len(b.enhancementPRURL) == 0: + return "", fmt.Errorf("FeatureGate/%s is missing an enhancementPR with GA Graduation Criteria like https://github.com/openshift/enhancements/pull/#### or https://github.com/kubernetes/enhancements/issues/####", b.name) + + case !strings.HasPrefix(b.enhancementPRURL, "https://github.com/openshift/enhancements/pull/") && !strings.HasPrefix(b.enhancementPRURL, "https://github.com/kubernetes/enhancements/issues/"): + return "", fmt.Errorf("FeatureGate/%s enhancementPR format is incorrect; must be like https://github.com/openshift/enhancements/pull/#### or https://github.com/kubernetes/enhancements/issues/####", b.name) + + case enhancementPRErr != nil: + return "", fmt.Errorf("FeatureGate/%s is enhancementPR is invalid: %w", b.name, enhancementPRErr) + } featureGateName := configv1.FeatureGateName(b.name) description := FeatureGateDescription{ @@ -118,6 +148,7 @@ func (b *featureGateBuilder) register() (configv1.FeatureGateName, error) { OwningJiraComponent: b.owningJiraComponent, ResponsiblePerson: b.responsiblePerson, OwningProduct: b.owningProduct, + EnhancementPR: b.enhancementPRURL, } // statusByClusterProfileByFeatureSet is initialized by constructor to be false for every combination diff --git a/vendor/github.com/openshift/api/kubecontrolplane/v1/types.go b/vendor/github.com/openshift/api/kubecontrolplane/v1/types.go index b9cdcc213b84..6d29f42e3fcd 100644 --- a/vendor/github.com/openshift/api/kubecontrolplane/v1/types.go +++ b/vendor/github.com/openshift/api/kubecontrolplane/v1/types.go @@ -62,6 +62,25 @@ type KubeAPIServerConfig struct { // TODO this needs to be removed. APIServerArguments map[string]Arguments `json:"apiServerArguments"` + + // minimumKubeletVersion is the lowest version of a kubelet that can join the cluster. + // Specifically, the apiserver will deny most authorization requests of kubelets that are older + // than the specified version, only allowing the kubelet to get and update its node object, and perform + // subjectaccessreviews. + // This means any kubelet that attempts to join the cluster will not be able to run any assigned workloads, + // and will eventually be marked as not ready. + // Its max length is 8, so maximum version allowed is either "9.999.99" or "99.99.99". + // Since the kubelet reports the version of the kubernetes release, not Openshift, this field references + // the underlying kubernetes version this version of Openshift is based off of. + // In other words: if an admin wishes to ensure no nodes run an older version than Openshift 4.17, then + // they should set the minimumKubeletVersion to 1.30.0. + // When comparing versions, the kubelet's version is stripped of any contents outside of major.minor.patch version. + // Thus, a kubelet with version "1.0.0-ec.0" will be compatible with minimumKubeletVersion "1.0.0" or earlier. + // +kubebuilder:validation:XValidation:rule="self == \"\" || self.matches('^[0-9]*.[0-9]*.[0-9]*$')",message="minmumKubeletVersion must be in a semver compatible format of x.y.z, or empty" + // +kubebuilder:validation:MaxLength:=8 + // +openshift:enable:FeatureGate=MinimumKubeletVersion + // +optional + MinimumKubeletVersion string `json:"minimumKubeletVersion"` } // Arguments masks the value so protobuf can generate diff --git a/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.swagger_doc_generated.go index 906bb271b00b..5ecdd0583926 100644 --- a/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.swagger_doc_generated.go @@ -33,6 +33,7 @@ var map_KubeAPIServerConfig = map[string]string{ "projectConfig": "projectConfig feeds an admission plugin", "serviceAccountPublicKeyFiles": "serviceAccountPublicKeyFiles is a list of files, each containing a PEM-encoded public RSA key. (If any file contains a private key, the public portion of the key is used) The list of public keys is used to verify presented service account tokens. Each key is tried in order until the list is exhausted or verification succeeds. If no keys are specified, no service account authentication will be available.", "oauthConfig": "oauthConfig, if present start the /oauth endpoint in this process", + "minimumKubeletVersion": "minimumKubeletVersion is the lowest version of a kubelet that can join the cluster. Specifically, the apiserver will deny most authorization requests of kubelets that are older than the specified version, only allowing the kubelet to get and update its node object, and perform subjectaccessreviews. This means any kubelet that attempts to join the cluster will not be able to run any assigned workloads, and will eventually be marked as not ready. Its max length is 8, so maximum version allowed is either \"9.999.99\" or \"99.99.99\". Since the kubelet reports the version of the kubernetes release, not Openshift, this field references the underlying kubernetes version this version of Openshift is based off of. In other words: if an admin wishes to ensure no nodes run an older version than Openshift 4.17, then they should set the minimumKubeletVersion to 1.30.0. When comparing versions, the kubelet's version is stripped of any contents outside of major.minor.patch version. Thus, a kubelet with version \"1.0.0-ec.0\" will be compatible with minimumKubeletVersion \"1.0.0\" or earlier.", } func (KubeAPIServerConfig) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/types.go b/vendor/github.com/openshift/api/machineconfiguration/v1/types.go index 574c90035a68..01644fcf7784 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/types.go +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/types.go @@ -196,7 +196,8 @@ type ControllerConfigStatus struct { ObservedGeneration int64 `json:"observedGeneration,omitempty"` // conditions represents the latest available observations of current state. - // +listType=atomic + // +listType=map + // +listMapKey=type // +optional Conditions []ControllerConfigStatusCondition `json:"conditions"` diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.featuregated-crd-manifests.yaml index c2cac2c544d3..0766b10691c1 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.featuregated-crd-manifests.yaml @@ -28,6 +28,7 @@ controllerconfigs.machineconfiguration.openshift.io: Capability: "" Category: "" FeatureGates: + - AWSClusterHostedDNS - BareMetalLoadBalancer - GCPClusterHostedDNS - GCPLabelsTags diff --git a/vendor/github.com/openshift/api/operator/v1/types.go b/vendor/github.com/openshift/api/operator/v1/types.go index eeb8afdc6d22..f04b6846aee5 100644 --- a/vendor/github.com/openshift/api/operator/v1/types.go +++ b/vendor/github.com/openshift/api/operator/v1/types.go @@ -147,17 +147,27 @@ type GenerationStatus struct { // group is the group of the thing you're tracking // +kubebuilder:validation:Required Group string `json:"group"` + // resource is the resource type of the thing you're tracking // +kubebuilder:validation:Required Resource string `json:"resource"` + // namespace is where the thing you're tracking is // +kubebuilder:validation:Required Namespace string `json:"namespace"` + // name is the name of the thing you're tracking // +kubebuilder:validation:Required Name string `json:"name"` + + // TODO: Add validation for lastGeneration. The value for this field should generally increase, except when the associated + // resource has been deleted and re-created. To accurately validate this field, we should introduce a new UID field and only + // enforce an increasing value in lastGeneration when the UID remains unchanged. A change in the UID indicates that the resource + // was re-created, allowing the lastGeneration value to reset or decrease. + // lastGeneration is the last generation of the workload controller involved LastGeneration int64 `json:"lastGeneration"` + // hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps Hash string `json:"hash"` } @@ -178,12 +188,34 @@ var ( // OperatorCondition is just the standard condition fields. type OperatorCondition struct { + // type of condition in CamelCase or in foo.example.com/CamelCase. + // --- + // Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + // useful (see .node.status.conditions), the ability to deconflict is important. + // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + // +required // +kubebuilder:validation:Required - Type string `json:"type"` - Status ConditionStatus `json:"status"` - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` - Reason string `json:"reason,omitempty"` - Message string `json:"message,omitempty"` + // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$` + // +kubebuilder:validation:MaxLength=316 + Type string `json:"type" protobuf:"bytes,1,opt,name=type"` + + // status of the condition, one of True, False, Unknown. + // +required + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum=True;False;Unknown + Status ConditionStatus `json:"status"` + + // lastTransitionTime is the last time the condition transitioned from one status to another. + // This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + // +required + // +kubebuilder:validation:Required + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Format=date-time + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + + Reason string `json:"reason,omitempty"` + + Message string `json:"message,omitempty"` } type ConditionStatus string diff --git a/vendor/github.com/openshift/api/operator/v1/types_ingress.go b/vendor/github.com/openshift/api/operator/v1/types_ingress.go index 7ae22ee0a7ce..1f5664345649 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_ingress.go +++ b/vendor/github.com/openshift/api/operator/v1/types_ingress.go @@ -392,6 +392,7 @@ type CIDR string // LoadBalancerStrategy holds parameters for a load balancer. // +openshift:validation:FeatureGateAwareXValidation:featureGate=SetEIPForNLBIngressController,rule="!has(self.scope) || self.scope != 'Internal' || !has(self.providerParameters) || !has(self.providerParameters.aws) || !has(self.providerParameters.aws.networkLoadBalancer) || !has(self.providerParameters.aws.networkLoadBalancer.eipAllocations)",message="eipAllocations are forbidden when the scope is Internal." +// +kubebuilder:validation:XValidation:rule=`!has(self.scope) || self.scope != 'Internal' || !has(self.providerParameters) || !has(self.providerParameters.openstack) || !has(self.providerParameters.openstack.floatingIP) || self.providerParameters.openstack.floatingIP == ""`,message="cannot specify a floating ip when scope is internal" type LoadBalancerStrategy struct { // scope indicates the scope at which the load balancer is exposed. // Possible values are "External" and "Internal". @@ -678,19 +679,28 @@ type IBMLoadBalancerParameters struct { // OpenStackLoadBalancerParameters provides configuration settings that are // specific to OpenStack load balancers. type OpenStackLoadBalancerParameters struct { - // loadBalancerIP specifies the floating IP address that the load balancer will use. + // loadBalancerIP is tombstoned since the field was replaced by floatingIP. + // LoadBalancerIP string `json:"loadBalancerIP,omitempty"` + + // floatingIP specifies the IP address that the load balancer will use. // When not specified, an IP address will be assigned randomly by the OpenStack cloud provider. + // When specified, the floating IP has to be pre-created. If the + // specified value is not a floating IP or is already claimed, the + // OpenStack cloud provider won't be able to provision the load + // balancer. + // This field may only be used if the IngressController has External scope. // This value must be a valid IPv4 or IPv6 address. // + --- - // + Note: this field is meant to be set by the ingress controller to populate the - // + `Service.Spec.LoadBalancerIP` field which has been deprecated in Kubernetes: + // + Note: this field is meant to be set by the ingress controller + // + to populate the `Service.Spec.LoadBalancerIP` field which has been + // + deprecated in Kubernetes: // + https://github.com/kubernetes/kubernetes/pull/107235 // + However, the field is still used by cloud-provider-openstack to reconcile - // + the floating IP that we attach to the load balancer. + // + the floating IP that we attach to the external load balancer. // - // +kubebuilder:validation:XValidation:rule="isIP(self)",message="loadBalancerIP must be a valid IPv4 or IPv6 address" + // +kubebuilder:validation:XValidation:rule="isIP(self)",message="floatingIP must be a valid IPv4 or IPv6 address" // +optional - LoadBalancerIP string `json:"loadBalancerIP,omitempty"` + FloatingIP string `json:"floatingIP,omitempty"` } // AWSClassicLoadBalancerParameters holds configuration parameters for an diff --git a/vendor/github.com/openshift/api/operator/v1/types_olm.go b/vendor/github.com/openshift/api/operator/v1/types_olm.go new file mode 100644 index 000000000000..84d7e9ea114d --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_olm.go @@ -0,0 +1,58 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OLM provides information to configure an operator to manage the OLM controllers +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=olms,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1504 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=operator-lifecycle-manager,operatorOrdering=01 +// +openshift:enable:FeatureGate=NewOLM +// +kubebuilder:validation:XValidation:rule="self.metadata.name == 'cluster'",message="olm is a singleton, .metadata.name must be 'cluster'" +type OLM struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + //spec holds user settable values for configuration + //+kubebuilder:validation:Required + Spec OLMSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status OLMStatus `json:"status"` +} + +type OLMSpec struct { + OperatorSpec `json:",inline"` +} + +type OLMStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OLMList is a collection of items +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OLMList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + // Items contains the items + Items []OLM `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go index 84edc0cab387..3b984f2a618b 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go @@ -3834,6 +3834,101 @@ func (in *OAuthAPIServerStatus) DeepCopy() *OAuthAPIServerStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OLM) DeepCopyInto(out *OLM) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OLM. +func (in *OLM) DeepCopy() *OLM { + if in == nil { + return nil + } + out := new(OLM) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OLM) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OLMList) DeepCopyInto(out *OLMList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OLM, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OLMList. +func (in *OLMList) DeepCopy() *OLMList { + if in == nil { + return nil + } + out := new(OLMList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OLMList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OLMSpec) DeepCopyInto(out *OLMSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OLMSpec. +func (in *OLMSpec) DeepCopy() *OLMSpec { + if in == nil { + return nil + } + out := new(OLMSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OLMStatus) DeepCopyInto(out *OLMStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OLMStatus. +func (in *OLMStatus) DeepCopy() *OLMStatus { + if in == nil { + return nil + } + out := new(OLMStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OVNKubernetesConfig) DeepCopyInto(out *OVNKubernetesConfig) { *out = *in diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml index 9ed8975177ca..ae9a88c228bb 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml @@ -346,6 +346,29 @@ networks.operator.openshift.io: TopLevelFeatureGates: [] Version: v1 +olms.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/1504 + CRDName: olms.operator.openshift.io + Capability: "" + Category: "" + FeatureGates: + - NewOLM + FilenameOperatorName: operator-lifecycle-manager + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: operator.openshift.io + HasStatus: true + KindName: OLM + Labels: {} + PluralName: olms + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - NewOLM + Version: v1 + openshiftapiservers.operator.openshift.io: Annotations: {} ApprovedPRNumber: https://github.com/openshift/api/pull/475 diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go index a3a78a89b7c7..3c00fe2f032a 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go @@ -52,7 +52,10 @@ func (NodeStatus) SwaggerDoc() map[string]string { } var map_OperatorCondition = map[string]string{ - "": "OperatorCondition is just the standard condition fields.", + "": "OperatorCondition is just the standard condition fields.", + "type": "type of condition in CamelCase or in foo.example.com/CamelCase.", + "status": "status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.", } func (OperatorCondition) SwaggerDoc() map[string]string { @@ -1122,8 +1125,8 @@ func (NodePortStrategy) SwaggerDoc() map[string]string { } var map_OpenStackLoadBalancerParameters = map[string]string{ - "": "OpenStackLoadBalancerParameters provides configuration settings that are specific to OpenStack load balancers.", - "loadBalancerIP": "loadBalancerIP specifies the floating IP address that the load balancer will use. When not specified, an IP address will be assigned randomly by the OpenStack cloud provider. This value must be a valid IPv4 or IPv6 address. ", + "": "OpenStackLoadBalancerParameters provides configuration settings that are specific to OpenStack load balancers.", + "floatingIP": "floatingIP specifies the IP address that the load balancer will use. When not specified, an IP address will be assigned randomly by the OpenStack cloud provider. When specified, the floating IP has to be pre-created. If the specified value is not a floating IP or is already claimed, the OpenStack cloud provider won't be able to provision the load balancer. This field may only be used if the IngressController has External scope. This value must be a valid IPv4 or IPv6 address. ", } func (OpenStackLoadBalancerParameters) SwaggerDoc() map[string]string { @@ -1893,6 +1896,27 @@ func (StaticIPAMRoutes) SwaggerDoc() map[string]string { return map_StaticIPAMRoutes } +var map_OLM = map[string]string{ + "": "OLM provides information to configure an operator to manage the OLM controllers\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (OLM) SwaggerDoc() map[string]string { + return map_OLM +} + +var map_OLMList = map[string]string{ + "": "OLMList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items contains the items", +} + +func (OLMList) SwaggerDoc() map[string]string { + return map_OLMList +} + var map_OpenShiftAPIServer = map[string]string{ "": "OpenShiftAPIServer provides information to configure an operator to manage openshift-apiserver.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsplatformstatus.go index e5b1b74eac6f..b217e5bdcd44 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsplatformstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsplatformstatus.go @@ -5,9 +5,10 @@ package v1 // AWSPlatformStatusApplyConfiguration represents a declarative configuration of the AWSPlatformStatus type for use // with apply. type AWSPlatformStatusApplyConfiguration struct { - Region *string `json:"region,omitempty"` - ServiceEndpoints []AWSServiceEndpointApplyConfiguration `json:"serviceEndpoints,omitempty"` - ResourceTags []AWSResourceTagApplyConfiguration `json:"resourceTags,omitempty"` + Region *string `json:"region,omitempty"` + ServiceEndpoints []AWSServiceEndpointApplyConfiguration `json:"serviceEndpoints,omitempty"` + ResourceTags []AWSResourceTagApplyConfiguration `json:"resourceTags,omitempty"` + CloudLoadBalancerConfig *CloudLoadBalancerConfigApplyConfiguration `json:"cloudLoadBalancerConfig,omitempty"` } // AWSPlatformStatusApplyConfiguration constructs a declarative configuration of the AWSPlatformStatus type for use with @@ -49,3 +50,11 @@ func (b *AWSPlatformStatusApplyConfiguration) WithResourceTags(values ...*AWSRes } return b } + +// WithCloudLoadBalancerConfig sets the CloudLoadBalancerConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CloudLoadBalancerConfig field is set to the value of the last call. +func (b *AWSPlatformStatusApplyConfiguration) WithCloudLoadBalancerConfig(value *CloudLoadBalancerConfigApplyConfiguration) *AWSPlatformStatusApplyConfiguration { + b.CloudLoadBalancerConfig = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nodespec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nodespec.go index 37965a1384c6..f1c513f4cb4c 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nodespec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nodespec.go @@ -9,8 +9,9 @@ import ( // NodeSpecApplyConfiguration represents a declarative configuration of the NodeSpec type for use // with apply. type NodeSpecApplyConfiguration struct { - CgroupMode *v1.CgroupMode `json:"cgroupMode,omitempty"` - WorkerLatencyProfile *v1.WorkerLatencyProfileType `json:"workerLatencyProfile,omitempty"` + CgroupMode *v1.CgroupMode `json:"cgroupMode,omitempty"` + WorkerLatencyProfile *v1.WorkerLatencyProfileType `json:"workerLatencyProfile,omitempty"` + MinimumKubeletVersion *string `json:"minimumKubeletVersion,omitempty"` } // NodeSpecApplyConfiguration constructs a declarative configuration of the NodeSpec type for use with @@ -34,3 +35,11 @@ func (b *NodeSpecApplyConfiguration) WithWorkerLatencyProfile(value v1.WorkerLat b.WorkerLatencyProfile = &value return b } + +// WithMinimumKubeletVersion sets the MinimumKubeletVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MinimumKubeletVersion field is set to the value of the last call. +func (b *NodeSpecApplyConfiguration) WithMinimumKubeletVersion(value string) *NodeSpecApplyConfiguration { + b.MinimumKubeletVersion = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go index 003e4d2eff02..55099fc9881f 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go @@ -139,6 +139,11 @@ var schemaYAML = typed.YAMLObject(`types: - name: com.github.openshift.api.config.v1.AWSPlatformStatus map: fields: + - name: cloudLoadBalancerConfig + type: + namedType: com.github.openshift.api.config.v1.CloudLoadBalancerConfig + default: + dnsType: PlatformDefault - name: region type: scalar: string @@ -2167,6 +2172,10 @@ var schemaYAML = typed.YAMLObject(`types: - name: cgroupMode type: scalar: string + - name: minimumKubeletVersion + type: + scalar: string + default: "" - name: workerLatencyProfile type: scalar: string diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/internal/internal.go index 60be55fbb032..2a11bffc77e3 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/internal/internal.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/internal/internal.go @@ -2897,6 +2897,81 @@ var schemaYAML = typed.YAMLObject(`types: - name: latestAvailableRevision type: scalar: numeric +- name: com.github.openshift.api.operator.v1.OLM + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.operator.v1.OLMSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.operator.v1.OLMStatus + default: {} +- name: com.github.openshift.api.operator.v1.OLMSpec + map: + fields: + - name: logLevel + type: + scalar: string + - name: managementState + type: + scalar: string + default: "" + - name: observedConfig + type: + namedType: __untyped_atomic_ + - name: operatorLogLevel + type: + scalar: string + - name: unsupportedConfigOverrides + type: + namedType: __untyped_atomic_ +- name: com.github.openshift.api.operator.v1.OLMStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.OperatorCondition + elementRelationship: associative + keys: + - type + - name: generations + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.GenerationStatus + elementRelationship: associative + keys: + - group + - resource + - namespace + - name + - name: latestAvailableRevision + type: + scalar: numeric + - name: observedGeneration + type: + scalar: numeric + - name: readyReplicas + type: + scalar: numeric + default: 0 + - name: version + type: + scalar: string - name: com.github.openshift.api.operator.v1.OVNKubernetesConfig map: fields: @@ -3111,7 +3186,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: com.github.openshift.api.operator.v1.OpenStackLoadBalancerParameters map: fields: - - name: loadBalancerIP + - name: floatingIP type: scalar: string - name: com.github.openshift.api.operator.v1.OperatorCondition diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olm.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olm.go new file mode 100644 index 000000000000..d1fb73953869 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olm.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + apioperatorv1 "github.com/openshift/api/operator/v1" + internal "github.com/openshift/client-go/operator/applyconfigurations/internal" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// OLMApplyConfiguration represents a declarative configuration of the OLM type for use +// with apply. +type OLMApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *OLMSpecApplyConfiguration `json:"spec,omitempty"` + Status *OLMStatusApplyConfiguration `json:"status,omitempty"` +} + +// OLM constructs a declarative configuration of the OLM type for use with +// apply. +func OLM(name string) *OLMApplyConfiguration { + b := &OLMApplyConfiguration{} + b.WithName(name) + b.WithKind("OLM") + b.WithAPIVersion("operator.openshift.io/v1") + return b +} + +// ExtractOLM extracts the applied configuration owned by fieldManager from +// oLM. If no managedFields are found in oLM for fieldManager, a +// OLMApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// oLM must be a unmodified OLM API object that was retrieved from the Kubernetes API. +// ExtractOLM provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractOLM(oLM *apioperatorv1.OLM, fieldManager string) (*OLMApplyConfiguration, error) { + return extractOLM(oLM, fieldManager, "") +} + +// ExtractOLMStatus is the same as ExtractOLM except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractOLMStatus(oLM *apioperatorv1.OLM, fieldManager string) (*OLMApplyConfiguration, error) { + return extractOLM(oLM, fieldManager, "status") +} + +func extractOLM(oLM *apioperatorv1.OLM, fieldManager string, subresource string) (*OLMApplyConfiguration, error) { + b := &OLMApplyConfiguration{} + err := managedfields.ExtractInto(oLM, internal.Parser().Type("com.github.openshift.api.operator.v1.OLM"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(oLM.Name) + + b.WithKind("OLM") + b.WithAPIVersion("operator.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithKind(value string) *OLMApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithAPIVersion(value string) *OLMApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithName(value string) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithGenerateName(value string) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithNamespace(value string) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithUID(value types.UID) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithResourceVersion(value string) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithGeneration(value int64) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithCreationTimestamp(value metav1.Time) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *OLMApplyConfiguration) WithLabels(entries map[string]string) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *OLMApplyConfiguration) WithAnnotations(entries map[string]string) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *OLMApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *OLMApplyConfiguration) WithFinalizers(values ...string) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *OLMApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithSpec(value *OLMSpecApplyConfiguration) *OLMApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithStatus(value *OLMStatusApplyConfiguration) *OLMApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *OLMApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olmspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olmspec.go new file mode 100644 index 000000000000..6639c0c46cf8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olmspec.go @@ -0,0 +1,60 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// OLMSpecApplyConfiguration represents a declarative configuration of the OLMSpec type for use +// with apply. +type OLMSpecApplyConfiguration struct { + OperatorSpecApplyConfiguration `json:",inline"` +} + +// OLMSpecApplyConfiguration constructs a declarative configuration of the OLMSpec type for use with +// apply. +func OLMSpec() *OLMSpecApplyConfiguration { + return &OLMSpecApplyConfiguration{} +} + +// WithManagementState sets the ManagementState field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ManagementState field is set to the value of the last call. +func (b *OLMSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *OLMSpecApplyConfiguration { + b.ManagementState = &value + return b +} + +// WithLogLevel sets the LogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogLevel field is set to the value of the last call. +func (b *OLMSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *OLMSpecApplyConfiguration { + b.LogLevel = &value + return b +} + +// WithOperatorLogLevel sets the OperatorLogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OperatorLogLevel field is set to the value of the last call. +func (b *OLMSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *OLMSpecApplyConfiguration { + b.OperatorLogLevel = &value + return b +} + +// WithUnsupportedConfigOverrides sets the UnsupportedConfigOverrides field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. +func (b *OLMSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *OLMSpecApplyConfiguration { + b.UnsupportedConfigOverrides = &value + return b +} + +// WithObservedConfig sets the ObservedConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedConfig field is set to the value of the last call. +func (b *OLMSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *OLMSpecApplyConfiguration { + b.ObservedConfig = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olmstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olmstatus.go new file mode 100644 index 000000000000..be8e647f3ae3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olmstatus.go @@ -0,0 +1,73 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// OLMStatusApplyConfiguration represents a declarative configuration of the OLMStatus type for use +// with apply. +type OLMStatusApplyConfiguration struct { + OperatorStatusApplyConfiguration `json:",inline"` +} + +// OLMStatusApplyConfiguration constructs a declarative configuration of the OLMStatus type for use with +// apply. +func OLMStatus() *OLMStatusApplyConfiguration { + return &OLMStatusApplyConfiguration{} +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *OLMStatusApplyConfiguration) WithObservedGeneration(value int64) *OLMStatusApplyConfiguration { + b.ObservedGeneration = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *OLMStatusApplyConfiguration) WithConditions(values ...*OperatorConditionApplyConfiguration) *OLMStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *OLMStatusApplyConfiguration) WithVersion(value string) *OLMStatusApplyConfiguration { + b.Version = &value + return b +} + +// WithReadyReplicas sets the ReadyReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReadyReplicas field is set to the value of the last call. +func (b *OLMStatusApplyConfiguration) WithReadyReplicas(value int32) *OLMStatusApplyConfiguration { + b.ReadyReplicas = &value + return b +} + +// WithLatestAvailableRevision sets the LatestAvailableRevision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LatestAvailableRevision field is set to the value of the last call. +func (b *OLMStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *OLMStatusApplyConfiguration { + b.LatestAvailableRevision = &value + return b +} + +// WithGenerations adds the given value to the Generations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Generations field. +func (b *OLMStatusApplyConfiguration) WithGenerations(values ...*GenerationStatusApplyConfiguration) *OLMStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithGenerations") + } + b.Generations = append(b.Generations, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openstackloadbalancerparameters.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openstackloadbalancerparameters.go index 07d523555b69..811b2330b718 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openstackloadbalancerparameters.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openstackloadbalancerparameters.go @@ -5,7 +5,7 @@ package v1 // OpenStackLoadBalancerParametersApplyConfiguration represents a declarative configuration of the OpenStackLoadBalancerParameters type for use // with apply. type OpenStackLoadBalancerParametersApplyConfiguration struct { - LoadBalancerIP *string `json:"loadBalancerIP,omitempty"` + FloatingIP *string `json:"floatingIP,omitempty"` } // OpenStackLoadBalancerParametersApplyConfiguration constructs a declarative configuration of the OpenStackLoadBalancerParameters type for use with @@ -14,10 +14,10 @@ func OpenStackLoadBalancerParameters() *OpenStackLoadBalancerParametersApplyConf return &OpenStackLoadBalancerParametersApplyConfiguration{} } -// WithLoadBalancerIP sets the LoadBalancerIP field in the declarative configuration to the given value +// WithFloatingIP sets the FloatingIP field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the LoadBalancerIP field is set to the value of the last call. -func (b *OpenStackLoadBalancerParametersApplyConfiguration) WithLoadBalancerIP(value string) *OpenStackLoadBalancerParametersApplyConfiguration { - b.LoadBalancerIP = &value +// If called multiple times, the FloatingIP field is set to the value of the last call. +func (b *OpenStackLoadBalancerParametersApplyConfiguration) WithFloatingIP(value string) *OpenStackLoadBalancerParametersApplyConfiguration { + b.FloatingIP = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/generated_expansion.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/generated_expansion.go index dd8a5efd399d..67d774a2a73e 100644 --- a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/generated_expansion.go +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/generated_expansion.go @@ -34,6 +34,8 @@ type MachineConfigurationExpansion interface{} type NetworkExpansion interface{} +type OLMExpansion interface{} + type OpenShiftAPIServerExpansion interface{} type OpenShiftControllerManagerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/olm.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/olm.go new file mode 100644 index 000000000000..6adb68c89a4b --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/olm.go @@ -0,0 +1,57 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + + v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/client-go/operator/applyconfigurations/operator/v1" + scheme "github.com/openshift/client-go/operator/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// OLMsGetter has a method to return a OLMInterface. +// A group's client should implement this interface. +type OLMsGetter interface { + OLMs() OLMInterface +} + +// OLMInterface has methods to work with OLM resources. +type OLMInterface interface { + Create(ctx context.Context, oLM *v1.OLM, opts metav1.CreateOptions) (*v1.OLM, error) + Update(ctx context.Context, oLM *v1.OLM, opts metav1.UpdateOptions) (*v1.OLM, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, oLM *v1.OLM, opts metav1.UpdateOptions) (*v1.OLM, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.OLM, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.OLMList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.OLM, err error) + Apply(ctx context.Context, oLM *operatorv1.OLMApplyConfiguration, opts metav1.ApplyOptions) (result *v1.OLM, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, oLM *operatorv1.OLMApplyConfiguration, opts metav1.ApplyOptions) (result *v1.OLM, err error) + OLMExpansion +} + +// oLMs implements OLMInterface +type oLMs struct { + *gentype.ClientWithListAndApply[*v1.OLM, *v1.OLMList, *operatorv1.OLMApplyConfiguration] +} + +// newOLMs returns a OLMs +func newOLMs(c *OperatorV1Client) *oLMs { + return &oLMs{ + gentype.NewClientWithListAndApply[*v1.OLM, *v1.OLMList, *operatorv1.OLMApplyConfiguration]( + "olms", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.OLM { return &v1.OLM{} }, + func() *v1.OLMList { return &v1.OLMList{} }), + } +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/operator_client.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/operator_client.go index 996fad76d7ec..9ba66e07ce8f 100644 --- a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/operator_client.go +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/operator_client.go @@ -28,6 +28,7 @@ type OperatorV1Interface interface { KubeStorageVersionMigratorsGetter MachineConfigurationsGetter NetworksGetter + OLMsGetter OpenShiftAPIServersGetter OpenShiftControllerManagersGetter ServiceCAsGetter @@ -105,6 +106,10 @@ func (c *OperatorV1Client) Networks() NetworkInterface { return newNetworks(c) } +func (c *OperatorV1Client) OLMs() OLMInterface { + return newOLMs(c) +} + func (c *OperatorV1Client) OpenShiftAPIServers() OpenShiftAPIServerInterface { return newOpenShiftAPIServers(c) } diff --git a/vendor/github.com/openshift/library-go/pkg/apiserver/jsonpatch/jsonpatch.go b/vendor/github.com/openshift/library-go/pkg/apiserver/jsonpatch/jsonpatch.go index 50a67cd7820a..a718832b1e15 100644 --- a/vendor/github.com/openshift/library-go/pkg/apiserver/jsonpatch/jsonpatch.go +++ b/vendor/github.com/openshift/library-go/pkg/apiserver/jsonpatch/jsonpatch.go @@ -2,6 +2,9 @@ package jsonpatch import ( "encoding/json" + "fmt" + + utilerrors "k8s.io/apimachinery/pkg/util/errors" ) type PatchOperation struct { @@ -34,7 +37,14 @@ func (p *PatchSet) WithTest(path string, value interface{}) *PatchSet { return p } +func (p *PatchSet) IsEmpty() bool { + return len(p.patches) == 0 +} + func (p *PatchSet) Marshal() ([]byte, error) { + if err := p.validate(); err != nil { + return nil, err + } jsonBytes, err := json.Marshal(p.patches) if err != nil { return nil, err @@ -51,6 +61,22 @@ func (p *PatchSet) addOperation(op, path string, value interface{}) { p.patches = append(p.patches, patch) } +func (p *PatchSet) validate() error { + var errs []error + for i, patch := range p.patches { + if patch.Op == patchTestOperation { + // testing resourceVersion is fragile + // because it is likely to change frequently + // instead, test against a different field + // should be written. + if patch.Path == "/metadata/resourceVersion" { + errs = append(errs, fmt.Errorf("test operation at index: %d contains forbidden path: %q", i, patch.Path)) + } + } + } + return utilerrors.NewAggregate(errs) +} + type TestCondition struct { path string value interface{} diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go b/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go index 63184d2eb41a..e6651fecc2c6 100644 --- a/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go +++ b/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go @@ -150,9 +150,9 @@ var ciphers = map[string]uint16{ // ref: https://www.iana.org/assignments/tls-parameters/tls-parameters.xml var openSSLToIANACiphersMap = map[string]string{ // TLS 1.3 ciphers - not configurable in go 1.13, all of them are used in TLSv1.3 flows - // "TLS_AES_128_GCM_SHA256": "TLS_AES_128_GCM_SHA256", // 0x13,0x01 - // "TLS_AES_256_GCM_SHA384": "TLS_AES_256_GCM_SHA384", // 0x13,0x02 - // "TLS_CHACHA20_POLY1305_SHA256": "TLS_CHACHA20_POLY1305_SHA256", // 0x13,0x03 + "TLS_AES_128_GCM_SHA256": "TLS_AES_128_GCM_SHA256", // 0x13,0x01 + "TLS_AES_256_GCM_SHA384": "TLS_AES_256_GCM_SHA384", // 0x13,0x02 + "TLS_CHACHA20_POLY1305_SHA256": "TLS_CHACHA20_POLY1305_SHA256", // 0x13,0x03 // TLS 1.2 "ECDHE-ECDSA-AES128-GCM-SHA256": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", // 0xC0,0x2B diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go index f954d48cc65b..377e278065cd 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go @@ -463,12 +463,21 @@ func ApplySecretImproved(ctx context.Context, client coreclientv1.SecretsGetter, // SyncConfigMap applies a ConfigMap from a location `sourceNamespace/sourceName` to `targetNamespace/targetName` func SyncConfigMap(ctx context.Context, client coreclientv1.ConfigMapsGetter, recorder events.Recorder, sourceNamespace, sourceName, targetNamespace, targetName string, ownerRefs []metav1.OwnerReference) (*corev1.ConfigMap, bool, error) { - return SyncPartialConfigMap(ctx, client, recorder, sourceNamespace, sourceName, targetNamespace, targetName, nil, ownerRefs) + return syncPartialConfigMap(ctx, client, recorder, sourceNamespace, sourceName, targetNamespace, targetName, nil, ownerRefs, nil) +} + +// SyncConfigMapWithLabels does what SyncConfigMap does, but adds additional labels to the target ConfigMap. +func SyncConfigMapWithLabels(ctx context.Context, client coreclientv1.ConfigMapsGetter, recorder events.Recorder, sourceNamespace, sourceName, targetNamespace, targetName string, ownerRefs []metav1.OwnerReference, labels map[string]string) (*corev1.ConfigMap, bool, error) { + return syncPartialConfigMap(ctx, client, recorder, sourceNamespace, sourceName, targetNamespace, targetName, nil, ownerRefs, labels) } // SyncPartialConfigMap does what SyncConfigMap does but it only synchronizes a subset of keys given by `syncedKeys`. // SyncPartialConfigMap will delete the target if `syncedKeys` are set but the source does not contain any of these keys. func SyncPartialConfigMap(ctx context.Context, client coreclientv1.ConfigMapsGetter, recorder events.Recorder, sourceNamespace, sourceName, targetNamespace, targetName string, syncedKeys sets.Set[string], ownerRefs []metav1.OwnerReference) (*corev1.ConfigMap, bool, error) { + return syncPartialConfigMap(ctx, client, recorder, sourceNamespace, sourceName, targetNamespace, targetName, syncedKeys, ownerRefs, nil) +} + +func syncPartialConfigMap(ctx context.Context, client coreclientv1.ConfigMapsGetter, recorder events.Recorder, sourceNamespace, sourceName, targetNamespace, targetName string, syncedKeys sets.Set[string], ownerRefs []metav1.OwnerReference, labels map[string]string) (*corev1.ConfigMap, bool, error) { source, err := client.ConfigMaps(sourceNamespace).Get(ctx, sourceName, metav1.GetOptions{}) switch { case apierrors.IsNotFound(err): @@ -500,6 +509,12 @@ func SyncPartialConfigMap(ctx context.Context, client coreclientv1.ConfigMapsGet source.Name = targetName source.ResourceVersion = "" source.OwnerReferences = ownerRefs + if labels != nil && source.Labels == nil { + source.Labels = map[string]string{} + } + for k, v := range labels { + source.Labels[k] = v + } return ApplyConfigMap(ctx, client, recorder, source) } } @@ -524,12 +539,21 @@ func deleteConfigMapSyncTarget(ctx context.Context, client coreclientv1.ConfigMa // SyncSecret applies a Secret from a location `sourceNamespace/sourceName` to `targetNamespace/targetName` func SyncSecret(ctx context.Context, client coreclientv1.SecretsGetter, recorder events.Recorder, sourceNamespace, sourceName, targetNamespace, targetName string, ownerRefs []metav1.OwnerReference) (*corev1.Secret, bool, error) { - return SyncPartialSecret(ctx, client, recorder, sourceNamespace, sourceName, targetNamespace, targetName, nil, ownerRefs) + return syncPartialSecret(ctx, client, recorder, sourceNamespace, sourceName, targetNamespace, targetName, nil, ownerRefs, nil) +} + +// SyncSecretWithLabels does what SyncSecret does, but adds additional labels to the target Secret. +func SyncSecretWithLabels(ctx context.Context, client coreclientv1.SecretsGetter, recorder events.Recorder, sourceNamespace, sourceName, targetNamespace, targetName string, ownerRefs []metav1.OwnerReference, labels map[string]string) (*corev1.Secret, bool, error) { + return syncPartialSecret(ctx, client, recorder, sourceNamespace, sourceName, targetNamespace, targetName, nil, ownerRefs, labels) } // SyncPartialSecret does what SyncSecret does but it only synchronizes a subset of keys given by `syncedKeys`. // SyncPartialSecret will delete the target if `syncedKeys` are set but the source does not contain any of these keys. func SyncPartialSecret(ctx context.Context, client coreclientv1.SecretsGetter, recorder events.Recorder, sourceNamespace, sourceName, targetNamespace, targetName string, syncedKeys sets.Set[string], ownerRefs []metav1.OwnerReference) (*corev1.Secret, bool, error) { + return syncPartialSecret(ctx, client, recorder, sourceNamespace, sourceName, targetNamespace, targetName, syncedKeys, ownerRefs, nil) +} + +func syncPartialSecret(ctx context.Context, client coreclientv1.SecretsGetter, recorder events.Recorder, sourceNamespace, sourceName, targetNamespace, targetName string, syncedKeys sets.Set[string], ownerRefs []metav1.OwnerReference, labels map[string]string) (*corev1.Secret, bool, error) { source, err := client.Secrets(sourceNamespace).Get(ctx, sourceName, metav1.GetOptions{}) switch { case apierrors.IsNotFound(err): @@ -579,6 +603,12 @@ func SyncPartialSecret(ctx context.Context, client coreclientv1.SecretsGetter, r source.Name = targetName source.ResourceVersion = "" source.OwnerReferences = ownerRefs + if labels != nil && source.Labels == nil { + source.Labels = map[string]string{} + } + for k, v := range labels { + source.Labels[k] = v + } return ApplySecret(ctx, client, recorder, source) } } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/monitoring.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/monitoring.go index 99f3ecb73bd6..d0996a2afb88 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/monitoring.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/monitoring.go @@ -2,23 +2,17 @@ package resourceapply import ( "context" - errorsstdlib "errors" - "fmt" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourcehelper" + "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" "k8s.io/klog/v2" - "k8s.io/utils/ptr" - - "github.com/openshift/library-go/pkg/operator/events" - "github.com/openshift/library-go/pkg/operator/resource/resourcehelper" - - "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" ) var alertmanagerGVR = schema.GroupVersionResource{Group: "monitoring.coreos.com", Version: "v1", Resource: "alertmanagers"} @@ -88,10 +82,10 @@ func ApplyUnstructuredResourceImproved( } existing, err := client.Resource(resourceGVR).Namespace(namespace).Get(ctx, name, metav1.GetOptions{}) if errors.IsNotFound(err) { - want, err := client.Resource(resourceGVR).Namespace(namespace).Create(ctx, required, metav1.CreateOptions{}) - resourcehelper.ReportCreateEvent(recorder, required, err) + want, errCreate := client.Resource(resourceGVR).Namespace(namespace).Create(ctx, required, metav1.CreateOptions{}) + resourcehelper.ReportCreateEvent(recorder, required, errCreate) cache.UpdateCachedResourceMetadata(required, want) - return want, true, err + return want, true, errCreate } if err != nil { return nil, false, err @@ -102,44 +96,15 @@ func ApplyUnstructuredResourceImproved( return existing, false, nil } - // Ensure metadata field is present on the object. existingCopy := existing.DeepCopy() - existingObjectMeta, found, err := unstructured.NestedMap(existingCopy.Object, "metadata") - if err != nil { - return nil, false, err - } - if !found { - return nil, false, errorsstdlib.New(fmt.Sprintf("metadata not found in the existing object: %s/%s", existing.GetNamespace(), existingCopy.GetName())) - } - requiredObjectMeta, found, err := unstructured.NestedMap(required.Object, "metadata") - if err != nil { - return nil, false, err - } - if !found { - return nil, false, errorsstdlib.New(fmt.Sprintf("metadata not found in the required object: %s/%s", required.GetNamespace(), required.GetName())) - } - // Cast the metadata to the correct type. - var existingObjectMetaTyped, requiredObjectMetaTyped metav1.ObjectMeta - err = runtime.DefaultUnstructuredConverter.FromUnstructured(existingObjectMeta, &existingObjectMetaTyped) - if err != nil { - return nil, false, err - } - err = runtime.DefaultUnstructuredConverter.FromUnstructured(requiredObjectMeta, &requiredObjectMetaTyped) + // Replace and/or merge certain metadata fields. + didMetadataModify := false + err = resourcemerge.EnsureObjectMetaForUnstructured(&didMetadataModify, existingCopy, required) if err != nil { return nil, false, err } - // Fail-fast if the resource versions differ. - if requiredObjectMetaTyped.ResourceVersion != "" && existingObjectMetaTyped.ResourceVersion != requiredObjectMetaTyped.ResourceVersion { - err = errors.NewConflict(resourceGVR.GroupResource(), name, fmt.Errorf("rejected to update %s %s because the object has been modified: desired/actual ResourceVersion: %v/%v", existing.GetKind(), existing.GetName(), requiredObjectMetaTyped.ResourceVersion, existingObjectMetaTyped.ResourceVersion)) - return nil, false, err - } - - // Check if the metadata objects differ. - didMetadataModify := ptr.To(false) - resourcemerge.EnsureObjectMeta(didMetadataModify, &existingObjectMetaTyped, requiredObjectMetaTyped) - // Deep-check the spec objects for equality, and update the cache in either case. if defaultingFunc == nil { defaultingFunc = noDefaulting @@ -147,26 +112,26 @@ func ApplyUnstructuredResourceImproved( if equalityChecker == nil { equalityChecker = equality.Semantic } - existingCopy, didSpecModify, err := ensureGenericSpec(required, existingCopy, defaultingFunc, equalityChecker) + didSpecModify := false + err = ensureGenericSpec(&didSpecModify, required, existingCopy, defaultingFunc, equalityChecker) if err != nil { return nil, false, err } - if !didSpecModify && !*didMetadataModify { + if !didSpecModify && !didMetadataModify { // Update cache even if certain fields are not modified, in order to maintain a consistent cache based on the // resource hash. The resource hash depends on the entire metadata, not just the fields that were checked above, cache.UpdateCachedResourceMetadata(required, existingCopy) return existingCopy, false, nil } + // Perform update if resource exists but different from the required (desired) one. if klog.V(4).Enabled() { klog.Infof("%s %q changes: %v", resourceGVR.String(), namespace+"/"+name, JSONPatchNoError(existing, existingCopy)) } - - // Perform update if resource exists but different from the required (desired) one. - actual, err := client.Resource(resourceGVR).Namespace(namespace).Update(ctx, required, metav1.UpdateOptions{}) - resourcehelper.ReportUpdateEvent(recorder, required, err) - cache.UpdateCachedResourceMetadata(required, actual) - return actual, true, err + actual, errUpdate := client.Resource(resourceGVR).Namespace(namespace).Update(ctx, existingCopy, metav1.UpdateOptions{}) + resourcehelper.ReportUpdateEvent(recorder, existingCopy, errUpdate) + cache.UpdateCachedResourceMetadata(existingCopy, actual) + return actual, true, errUpdate } // DeleteUnstructuredResource deletes the unstructured resource. @@ -182,27 +147,27 @@ func DeleteUnstructuredResource(ctx context.Context, client dynamic.Interface, r return nil, true, nil } -func ensureGenericSpec(required, existing *unstructured.Unstructured, mimicDefaultingFn mimicDefaultingFunc, equalityChecker equalityChecker) (*unstructured.Unstructured, bool, error) { +func ensureGenericSpec(didSpecModify *bool, required, existing *unstructured.Unstructured, mimicDefaultingFn mimicDefaultingFunc, equalityChecker equalityChecker) error { mimicDefaultingFn(required) requiredSpec, _, err := unstructured.NestedMap(required.UnstructuredContent(), "spec") if err != nil { - return nil, false, err + return err } existingSpec, _, err := unstructured.NestedMap(existing.UnstructuredContent(), "spec") if err != nil { - return nil, false, err + return err } if equalityChecker.DeepEqual(existingSpec, requiredSpec) { - return existing, false, nil + return nil } - existingCopy := existing.DeepCopy() - if err := unstructured.SetNestedMap(existingCopy.UnstructuredContent(), requiredSpec, "spec"); err != nil { - return nil, true, err + if err = unstructured.SetNestedMap(existing.UnstructuredContent(), requiredSpec, "spec"); err != nil { + return err } + *didSpecModify = true - return existingCopy, true, nil + return nil } // mimicDefaultingFunc is used to set fields that are defaulted. This allows for sparse manifests to apply correctly. diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/object_merger.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/object_merger.go index 4c5dcacaa7ba..20e19a78fea4 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/object_merger.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/object_merger.go @@ -1,10 +1,14 @@ package resourcemerge import ( + errorsstdlib "errors" + "fmt" "reflect" "strings" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) @@ -18,6 +22,48 @@ func EnsureObjectMeta(modified *bool, existing *metav1.ObjectMeta, required meta MergeOwnerRefs(modified, &existing.OwnerReferences, required.OwnerReferences) } +func EnsureObjectMetaForUnstructured(modified *bool, existing *unstructured.Unstructured, required *unstructured.Unstructured) error { + + // Ensure metadata field is present on the object. + existingObjectMeta, found, err := unstructured.NestedMap(existing.Object, "metadata") + if err != nil { + return err + } + if !found { + return errorsstdlib.New(fmt.Sprintf("metadata not found in the existing object: %s/%s", existing.GetNamespace(), existing.GetName())) + } + var requiredObjectMeta map[string]interface{} + requiredObjectMeta, found, err = unstructured.NestedMap(required.Object, "metadata") + if err != nil { + return err + } + if !found { + return errorsstdlib.New(fmt.Sprintf("metadata not found in the required object: %s/%s", required.GetNamespace(), required.GetName())) + } + + // Cast the metadata to the correct type. + var existingObjectMetaTyped, requiredObjectMetaTyped metav1.ObjectMeta + err = runtime.DefaultUnstructuredConverter.FromUnstructured(existingObjectMeta, &existingObjectMetaTyped) + if err != nil { + return err + } + err = runtime.DefaultUnstructuredConverter.FromUnstructured(requiredObjectMeta, &requiredObjectMetaTyped) + if err != nil { + return err + } + + // Check if the metadata objects differ. This only checks for selective fields (excluding the resource version, among others). + EnsureObjectMeta(modified, &existingObjectMetaTyped, requiredObjectMetaTyped) + if *modified { + existing.Object["metadata"], err = runtime.DefaultUnstructuredConverter.ToUnstructured(&existingObjectMetaTyped) + if err != nil { + return err + } + } + + return nil +} + // WithCleanLabelsAndAnnotations cleans the metadata off the removal annotations/labels/ownerrefs // (those that end with trailing "-") func WithCleanLabelsAndAnnotations(obj metav1.Object) metav1.Object { diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/admission.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/admission.go index 572b915bc614..11326c89d77e 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/admission.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/admission.go @@ -53,3 +53,21 @@ func ReadValidatingAdmissionPolicyBindingV1beta1OrDie(objBytes []byte) *admissio return requiredObj.(*admissionv1beta1.ValidatingAdmissionPolicyBinding) } + +func ReadValidatingAdmissionPolicyV1OrDie(objBytes []byte) *admissionv1.ValidatingAdmissionPolicy { + requiredObj, err := runtime.Decode(admissionCodecs.UniversalDecoder(admissionv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + + return requiredObj.(*admissionv1.ValidatingAdmissionPolicy) +} + +func ReadValidatingAdmissionPolicyBindingV1OrDie(objBytes []byte) *admissionv1.ValidatingAdmissionPolicyBinding { + requiredObj, err := runtime.Decode(admissionCodecs.UniversalDecoder(admissionv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + + return requiredObj.(*admissionv1.ValidatingAdmissionPolicyBinding) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go index 68201864ba58..50bfae945ee4 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go @@ -3,11 +3,12 @@ package v1helpers import ( "context" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - operatorv1 "github.com/openshift/api/operator/v1" applyoperatorv1 "github.com/openshift/client-go/operator/applyconfigurations/operator/v1" + "github.com/openshift/library-go/pkg/apiserver/jsonpatch" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/client-go/tools/cache" ) @@ -26,6 +27,8 @@ type OperatorClient interface { ApplyOperatorSpec(ctx context.Context, fieldManager string, applyConfiguration *applyoperatorv1.OperatorSpecApplyConfiguration) (err error) ApplyOperatorStatus(ctx context.Context, fieldManager string, applyConfiguration *applyoperatorv1.OperatorStatusApplyConfiguration) (err error) + + PatchOperatorStatus(ctx context.Context, jsonPatch *jsonpatch.PatchSet) (err error) } type StaticPodOperatorClient interface { @@ -43,6 +46,8 @@ type StaticPodOperatorClient interface { ApplyStaticPodOperatorSpec(ctx context.Context, fieldManager string, applyConfiguration *applyoperatorv1.StaticPodOperatorSpecApplyConfiguration) (err error) ApplyStaticPodOperatorStatus(ctx context.Context, fieldManager string, applyConfiguration *applyoperatorv1.StaticPodOperatorStatusApplyConfiguration) (err error) + + PatchStaticOperatorStatus(ctx context.Context, jsonPatch *jsonpatch.PatchSet) (err error) } type OperatorClientWithFinalizers interface { diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go index 70d0251a9bc3..68694179af63 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go @@ -6,7 +6,6 @@ import ( "strconv" "time" - applyoperatorv1 "github.com/openshift/client-go/operator/applyconfigurations/operator/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -19,6 +18,8 @@ import ( operatorv1 "github.com/openshift/api/operator/v1" v1 "github.com/openshift/api/operator/v1" + applyoperatorv1 "github.com/openshift/client-go/operator/applyconfigurations/operator/v1" + "github.com/openshift/library-go/pkg/apiserver/jsonpatch" ) // NewFakeSharedIndexInformer returns a fake shared index informer, suitable to use in static pod controller unit tests. @@ -84,7 +85,7 @@ func (fakeSharedIndexInformer) SetTransform(f cache.TransformFunc) error { func NewFakeStaticPodOperatorClient( staticPodSpec *operatorv1.StaticPodOperatorSpec, staticPodStatus *operatorv1.StaticPodOperatorStatus, triggerStatusErr func(rv string, status *operatorv1.StaticPodOperatorStatus) error, - triggerSpecErr func(rv string, spec *operatorv1.StaticPodOperatorSpec) error) StaticPodOperatorClient { + triggerSpecErr func(rv string, spec *operatorv1.StaticPodOperatorSpec) error) *fakeStaticPodOperatorClient { return &fakeStaticPodOperatorClient{ fakeStaticPodOperatorSpec: staticPodSpec, fakeStaticPodOperatorStatus: staticPodStatus, @@ -100,6 +101,8 @@ type fakeStaticPodOperatorClient struct { resourceVersion string triggerStatusUpdateError func(rv string, status *operatorv1.StaticPodOperatorStatus) error triggerSpecUpdateError func(rv string, status *operatorv1.StaticPodOperatorSpec) error + + patchedOperatorStatus *jsonpatch.PatchSet } func (c *fakeStaticPodOperatorClient) Informer() cache.SharedIndexInformer { @@ -164,13 +167,13 @@ func (c *fakeStaticPodOperatorClient) ApplyOperatorSpec(ctx context.Context, fie func (c *fakeStaticPodOperatorClient) ApplyOperatorStatus(ctx context.Context, fieldManager string, applyConfiguration *applyoperatorv1.OperatorStatusApplyConfiguration) (err error) { if c.triggerStatusUpdateError != nil { - operatorStatus := &operatorv1.StaticPodOperatorStatus{OperatorStatus: *convertOperatorStatusApplyConfiguration(applyConfiguration)} + operatorStatus := &operatorv1.StaticPodOperatorStatus{OperatorStatus: *mergeOperatorStatusApplyConfiguration(&c.fakeStaticPodOperatorStatus.OperatorStatus, applyConfiguration)} if err := c.triggerStatusUpdateError("", operatorStatus); err != nil { return err } } c.fakeStaticPodOperatorStatus = &operatorv1.StaticPodOperatorStatus{ - OperatorStatus: *convertOperatorStatusApplyConfiguration(applyConfiguration), + OperatorStatus: *mergeOperatorStatusApplyConfiguration(&c.fakeStaticPodOperatorStatus.OperatorStatus, applyConfiguration), } return nil } @@ -180,10 +183,32 @@ func (c *fakeStaticPodOperatorClient) ApplyStaticPodOperatorSpec(ctx context.Con } func (c *fakeStaticPodOperatorClient) ApplyStaticPodOperatorStatus(ctx context.Context, fieldManager string, applyConfiguration *applyoperatorv1.StaticPodOperatorStatusApplyConfiguration) (err error) { - c.fakeStaticPodOperatorStatus = convertStaticPodOperatorStatusApplyConfiguration(applyConfiguration) + if c.triggerStatusUpdateError != nil { + operatorStatus := mergeStaticPodOperatorStatusApplyConfiguration(&c.fakeStaticPodOperatorStatus.OperatorStatus, applyConfiguration) + if err := c.triggerStatusUpdateError("", operatorStatus); err != nil { + return err + } + } + c.fakeStaticPodOperatorStatus = mergeStaticPodOperatorStatusApplyConfiguration(&c.fakeStaticPodOperatorStatus.OperatorStatus, applyConfiguration) + return nil +} + +func (c *fakeStaticPodOperatorClient) PatchOperatorStatus(ctx context.Context, jsonPatch *jsonpatch.PatchSet) (err error) { + return nil +} + +func (c *fakeStaticPodOperatorClient) PatchStaticOperatorStatus(ctx context.Context, jsonPatch *jsonpatch.PatchSet) (err error) { + if c.triggerStatusUpdateError != nil { + return c.triggerStatusUpdateError("", nil) + } + c.patchedOperatorStatus = jsonPatch return nil } +func (c *fakeStaticPodOperatorClient) GetPatchedOperatorStatus() *jsonpatch.PatchSet { + return c.patchedOperatorStatus +} + func (c *fakeStaticPodOperatorClient) GetOperatorState() (*operatorv1.OperatorSpec, *operatorv1.OperatorStatus, string, error) { return &c.fakeStaticPodOperatorSpec.OperatorSpec, &c.fakeStaticPodOperatorStatus.OperatorStatus, c.resourceVersion, nil } @@ -239,11 +264,11 @@ func (n *fakeNodeLister) Get(name string) (*corev1.Node, error) { } // NewFakeOperatorClient returns a fake operator client suitable to use in static pod controller unit tests. -func NewFakeOperatorClient(spec *operatorv1.OperatorSpec, status *operatorv1.OperatorStatus, triggerErr func(rv string, status *operatorv1.OperatorStatus) error) OperatorClientWithFinalizers { +func NewFakeOperatorClient(spec *operatorv1.OperatorSpec, status *operatorv1.OperatorStatus, triggerErr func(rv string, status *operatorv1.OperatorStatus) error) *fakeOperatorClient { return NewFakeOperatorClientWithObjectMeta(nil, spec, status, triggerErr) } -func NewFakeOperatorClientWithObjectMeta(meta *metav1.ObjectMeta, spec *operatorv1.OperatorSpec, status *operatorv1.OperatorStatus, triggerErr func(rv string, status *operatorv1.OperatorStatus) error) OperatorClientWithFinalizers { +func NewFakeOperatorClientWithObjectMeta(meta *metav1.ObjectMeta, spec *operatorv1.OperatorSpec, status *operatorv1.OperatorStatus, triggerErr func(rv string, status *operatorv1.OperatorStatus) error) *fakeOperatorClient { return &fakeOperatorClient{ fakeOperatorSpec: spec, fakeOperatorStatus: status, @@ -259,6 +284,8 @@ type fakeOperatorClient struct { fakeObjectMeta *metav1.ObjectMeta resourceVersion string triggerStatusUpdateError func(rv string, status *operatorv1.OperatorStatus) error + + patchedOperatorStatus *jsonpatch.PatchSet } func (c *fakeOperatorClient) Informer() cache.SharedIndexInformer { @@ -317,10 +344,22 @@ func (c *fakeOperatorClient) ApplyOperatorSpec(ctx context.Context, fieldManager } func (c *fakeOperatorClient) ApplyOperatorStatus(ctx context.Context, fieldManager string, applyConfiguration *applyoperatorv1.OperatorStatusApplyConfiguration) (err error) { - c.fakeOperatorStatus = convertOperatorStatusApplyConfiguration(applyConfiguration) + c.fakeOperatorStatus = mergeOperatorStatusApplyConfiguration(c.fakeOperatorStatus, applyConfiguration) + return nil +} + +func (c *fakeOperatorClient) PatchOperatorStatus(ctx context.Context, jsonPatch *jsonpatch.PatchSet) (err error) { + if c.triggerStatusUpdateError != nil { + return c.triggerStatusUpdateError("", nil) + } + c.patchedOperatorStatus = jsonPatch return nil } +func (c *fakeOperatorClient) GetPatchedOperatorStatus() *jsonpatch.PatchSet { + return c.patchedOperatorStatus +} + func (c *fakeOperatorClient) EnsureFinalizer(ctx context.Context, finalizer string) error { if c.fakeObjectMeta == nil { c.fakeObjectMeta = &metav1.ObjectMeta{} @@ -350,12 +389,12 @@ func (c *fakeOperatorClient) SetObjectMeta(meta *metav1.ObjectMeta) { c.fakeObjectMeta = meta } -func convertOperatorStatusApplyConfiguration(applyConfiguration *applyoperatorv1.OperatorStatusApplyConfiguration) *v1.OperatorStatus { +func mergeOperatorStatusApplyConfiguration(currentOperatorStatus *v1.OperatorStatus, applyConfiguration *applyoperatorv1.OperatorStatusApplyConfiguration) *v1.OperatorStatus { status := &v1.OperatorStatus{ - ObservedGeneration: ptr.Deref(applyConfiguration.ObservedGeneration, 0), - Version: ptr.Deref(applyConfiguration.Version, ""), - ReadyReplicas: ptr.Deref(applyConfiguration.ReadyReplicas, 0), - LatestAvailableRevision: ptr.Deref(applyConfiguration.LatestAvailableRevision, 0), + ObservedGeneration: ptr.Deref(applyConfiguration.ObservedGeneration, currentOperatorStatus.ObservedGeneration), + Version: ptr.Deref(applyConfiguration.Version, currentOperatorStatus.Version), + ReadyReplicas: ptr.Deref(applyConfiguration.ReadyReplicas, currentOperatorStatus.ReadyReplicas), + LatestAvailableRevision: ptr.Deref(applyConfiguration.LatestAvailableRevision, currentOperatorStatus.LatestAvailableRevision), } for _, condition := range applyConfiguration.Conditions { @@ -367,6 +406,20 @@ func convertOperatorStatusApplyConfiguration(applyConfiguration *applyoperatorv1 } status.Conditions = append(status.Conditions, newCondition) } + var existingConditions []v1.OperatorCondition + for _, condition := range currentOperatorStatus.Conditions { + var foundCondition bool + for _, statusCondition := range status.Conditions { + if condition.Type == statusCondition.Type { + foundCondition = true + break + } + } + if !foundCondition { + existingConditions = append(existingConditions, condition) + } + } + status.Conditions = append(status.Conditions, existingConditions...) for _, generation := range applyConfiguration.Generations { newGeneration := operatorv1.GenerationStatus{ @@ -379,13 +432,27 @@ func convertOperatorStatusApplyConfiguration(applyConfiguration *applyoperatorv1 } status.Generations = append(status.Generations, newGeneration) } + var existingGenerations []v1.GenerationStatus + for _, generation := range currentOperatorStatus.Generations { + var foundGeneration bool + for _, statusGeneration := range status.Generations { + if generation.Namespace == statusGeneration.Namespace && generation.Name == statusGeneration.Name { + foundGeneration = true + break + } + } + if !foundGeneration { + existingGenerations = append(existingGenerations, generation) + } + } + status.Generations = append(status.Generations, existingGenerations...) return status } -func convertStaticPodOperatorStatusApplyConfiguration(applyConfiguration *applyoperatorv1.StaticPodOperatorStatusApplyConfiguration) *v1.StaticPodOperatorStatus { +func mergeStaticPodOperatorStatusApplyConfiguration(currentOperatorStatus *v1.OperatorStatus, applyConfiguration *applyoperatorv1.StaticPodOperatorStatusApplyConfiguration) *v1.StaticPodOperatorStatus { status := &v1.StaticPodOperatorStatus{ - OperatorStatus: *convertOperatorStatusApplyConfiguration(&applyConfiguration.OperatorStatusApplyConfiguration), + OperatorStatus: *mergeOperatorStatusApplyConfiguration(currentOperatorStatus, &applyConfiguration.OperatorStatusApplyConfiguration), } for _, nodeStatus := range applyConfiguration.NodeStatuses { diff --git a/vendor/github.com/openshift/library-go/pkg/route/common.go b/vendor/github.com/openshift/library-go/pkg/route/common.go index 988f149ca68b..bd378da01c61 100644 --- a/vendor/github.com/openshift/library-go/pkg/route/common.go +++ b/vendor/github.com/openshift/library-go/pkg/route/common.go @@ -2,14 +2,9 @@ package route import ( "context" - "fmt" authorizationv1 "k8s.io/api/authorization/v1" - "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/apiserver/pkg/endpoints/request" - routev1 "github.com/openshift/api/route/v1" - "github.com/openshift/library-go/pkg/authorization/authorizationutil" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -26,35 +21,3 @@ type RouteValidationOptions struct { // feature gate is enabled. AllowExternalCertificates bool } - -// CheckRouteCustomHostSAR checks if user has permission to create and update routes/custom-host -// sub-resource -func CheckRouteCustomHostSAR(ctx context.Context, fldPath *field.Path, sarc SubjectAccessReviewCreator) field.ErrorList { - user, ok := request.UserFrom(ctx) - if !ok { - return field.ErrorList{field.InternalError(fldPath, fmt.Errorf("unable to verify host field can be set"))} - } - - var errs field.ErrorList - if err := authorizationutil.Authorize(sarc, user, &authorizationv1.ResourceAttributes{ - Namespace: request.NamespaceValue(ctx), - Verb: "create", - Group: routev1.GroupName, - Resource: "routes", - Subresource: "custom-host", - }); err != nil { - errs = append(errs, field.Forbidden(fldPath, "user does not have create permission on custom-host")) - } - - if err := authorizationutil.Authorize(sarc, user, &authorizationv1.ResourceAttributes{ - Namespace: request.NamespaceValue(ctx), - Verb: "update", - Group: routev1.GroupName, - Resource: "routes", - Subresource: "custom-host", - }); err != nil { - errs = append(errs, field.Forbidden(fldPath, "user does not have update permission on custom-host")) - } - - return errs -} diff --git a/vendor/github.com/openshift/library-go/pkg/route/hostassignment/assignment.go b/vendor/github.com/openshift/library-go/pkg/route/hostassignment/assignment.go index d37bb9f6a09d..81cd5a656d1c 100644 --- a/vendor/github.com/openshift/library-go/pkg/route/hostassignment/assignment.go +++ b/vendor/github.com/openshift/library-go/pkg/route/hostassignment/assignment.go @@ -119,7 +119,7 @@ func hasCertificateInfo(tls *routev1.TLSConfig, opts route.RouteValidationOption } // certificateChangeRequiresAuth determines whether changes to the TLS certificate configuration require authentication. -// Note: If either route uses externalCertificate, this function always returns true, as we cannot definitively verify if +// Note: If (newer/updated) route uses externalCertificate, this function always returns true, as we cannot definitively verify if // the content of the referenced secret has been modified. Even if the secret name remains the same, // we must assume that the secret content is changed, necessitating authorization. func certificateChangeRequiresAuth(route, older *routev1.Route, opts route.RouteValidationOptions) bool { @@ -137,7 +137,7 @@ func certificateChangeRequiresAuth(route, older *routev1.Route, opts route.Route a.Key != b.Key if opts.AllowExternalCertificates { - if route.Spec.TLS.ExternalCertificate != nil || older.Spec.TLS.ExternalCertificate != nil { + if route.Spec.TLS.ExternalCertificate != nil { certChanged = true } } @@ -166,8 +166,17 @@ func validateImmutableField(newVal, oldVal interface{}, fldPath *field.Path, err // done to the route object. If the route's host/subdomain has been updated it checks if // the user has "update" permission on custom-host subresource. If only the certificate // has changed, it checks if the user has "create" permission on the custom-host subresource. -// Caveat here is that if the route uses externalCertificate, the certChanged condition will -// always be true since we cannot verify state of external secret object. +// +// Which means "update" permission is required to change host/subdomain and +// either "create" or "update" permission is required to change certificate. +// Removing certificate info is allowed without any permission. +// https://github.com/openshift/origin/pull/18177#issuecomment-360660024. +// +// Caveat here is that if the (newer/updated) route uses externalCertificate, +// the certChanged condition will always be true (even when the secret name remains unchanged), +// since we cannot verify state of external secret object. +// Due to this it proceeds with the assumption that the certificate has changed +// when the route has externalCertificate set. func ValidateHostUpdate(ctx context.Context, route, older *routev1.Route, sarc route.SubjectAccessReviewCreator, opts route.RouteValidationOptions) field.ErrorList { hostChanged := route.Spec.Host != older.Spec.Host subdomainChanged := route.Spec.Subdomain != older.Spec.Subdomain @@ -246,7 +255,9 @@ func ValidateHostUpdate(ctx context.Context, route, older *routev1.Route, sarc r if route.Spec.TLS.ExternalCertificate == nil || older.Spec.TLS.ExternalCertificate == nil { errs = append(errs, validateImmutableField(route.Spec.TLS.ExternalCertificate, older.Spec.TLS.ExternalCertificate, field.NewPath("spec", "tls", "externalCertificate"), routeTLSPermissionErrMsg)...) } else { - errs = append(errs, validateImmutableField(route.Spec.TLS.ExternalCertificate.Name, older.Spec.TLS.ExternalCertificate.Name, field.NewPath("spec", "tls", "externalCertificate"), routeTLSPermissionErrMsg)...) + // since the state of the external secret cannot be verified, return error (even when secret name remains unchanged) + // without performing immutability checks, if externalCertificate is set. + errs = append(errs, field.Invalid(field.NewPath("spec", "tls", "externalCertificate"), route.Spec.TLS.ExternalCertificate, routeTLSPermissionErrMsg)) } } return errs diff --git a/vendor/github.com/openshift/library-go/pkg/route/hostassignment/externalcertificate.go b/vendor/github.com/openshift/library-go/pkg/route/hostassignment/externalcertificate.go deleted file mode 100644 index 15b0dfecc226..000000000000 --- a/vendor/github.com/openshift/library-go/pkg/route/hostassignment/externalcertificate.go +++ /dev/null @@ -1,35 +0,0 @@ -package hostassignment - -import ( - "context" - - "k8s.io/apimachinery/pkg/util/validation/field" - - routev1 "github.com/openshift/api/route/v1" - routecommon "github.com/openshift/library-go/pkg/route" -) - -// ValidateHostExternalCertificate checks if the user has permissions to create and update -// custom-host subresource of routes. This check is required to be done prior to ValidateHostUpdate() -// since updating hosts while using externalCertificate is contingent on the user having both these -// permissions. The ValidateHostUpdate() cannot differentiate if the certificate has changed since -// now the certificates will be present as a secret object, due to this it proceeds with the assumption -// that the certificate has changed when the route has externalCertificate set. -// TODO: Consider merging this function into ValidateHostUpdate. -func ValidateHostExternalCertificate(ctx context.Context, new, older *routev1.Route, sarc routecommon.SubjectAccessReviewCreator, opts routecommon.RouteValidationOptions) field.ErrorList { - - if !opts.AllowExternalCertificates { - // Return nil since the feature gate is off. - // ValidateHostUpdate() is sufficient to validate - // permissions. - return nil - } - - newTLS := new.Spec.TLS - oldTLS := older.Spec.TLS - if (newTLS != nil && newTLS.ExternalCertificate != nil) || (oldTLS != nil && oldTLS.ExternalCertificate != nil) { - return routecommon.CheckRouteCustomHostSAR(ctx, field.NewPath("spec", "tls", "externalCertificate"), sarc) - } - - return nil -} diff --git a/vendor/github.com/openshift/library-go/pkg/route/validation/validation.go b/vendor/github.com/openshift/library-go/pkg/route/validation/validation.go index 02466f83f1cf..a3896006f954 100644 --- a/vendor/github.com/openshift/library-go/pkg/route/validation/validation.go +++ b/vendor/github.com/openshift/library-go/pkg/route/validation/validation.go @@ -313,9 +313,7 @@ func validateTLS(ctx context.Context, route *routev1.Route, fldPath *field.Path, // using externalCertificate. Called by validateTLS. func validateTLSExternalCertificate(ctx context.Context, route *routev1.Route, fldPath *field.Path, sarc routecommon.SubjectAccessReviewCreator, secretsGetter corev1client.SecretsGetter) field.ErrorList { tls := route.Spec.TLS - - // user must have create and update permission on the custom-host sub-resource. - errs := routecommon.CheckRouteCustomHostSAR(ctx, fldPath, sarc) + var errs field.ErrorList // The router serviceaccount must have permission to get/list/watch the referenced secret. // The role and rolebinding to provide this access must be provided by the user. diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE index 6a66aea5eafe..2a7cf70da6e4 100644 --- a/vendor/golang.org/x/oauth2/LICENSE +++ b/vendor/golang.org/x/oauth2/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go index 636edb460a43..aa69fb4d509f 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -719,6 +719,8 @@ type PythonSettings struct { // Some settings. Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` + // Experimental features to be included during client library generation. + ExperimentalFeatures *PythonSettings_ExperimentalFeatures `protobuf:"bytes,2,opt,name=experimental_features,json=experimentalFeatures,proto3" json:"experimental_features,omitempty"` } func (x *PythonSettings) Reset() { @@ -760,6 +762,13 @@ func (x *PythonSettings) GetCommon() *CommonLanguageSettings { return nil } +func (x *PythonSettings) GetExperimentalFeatures() *PythonSettings_ExperimentalFeatures { + if x != nil { + return x.ExperimentalFeatures + } + return nil +} + // Settings for Node client libraries. type NodeSettings struct { state protoimpl.MessageState @@ -1024,6 +1033,13 @@ type MethodSettings struct { // The fully qualified name of the method, for which the options below apply. // This is used to find the method to apply the options. + // + // Example: + // + // publishing: + // method_settings: + // - selector: google.storage.control.v2.StorageControl.CreateFolder + // # method settings for CreateFolder... Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` // Describes settings to use for long-running operations when generating // API methods for RPCs. Complements RPCs that use the annotations in @@ -1033,15 +1049,12 @@ type MethodSettings struct { // // publishing: // method_settings: - // - selector: google.cloud.speech.v2.Speech.BatchRecognize - // long_running: - // initial_poll_delay: - // seconds: 60 # 1 minute - // poll_delay_multiplier: 1.5 - // max_poll_delay: - // seconds: 360 # 6 minutes - // total_poll_timeout: - // seconds: 54000 # 90 minutes + // - selector: google.cloud.speech.v2.Speech.BatchRecognize + // long_running: + // initial_poll_delay: 60s # 1 minute + // poll_delay_multiplier: 1.5 + // max_poll_delay: 360s # 6 minutes + // total_poll_timeout: 54000s # 90 minutes LongRunning *MethodSettings_LongRunning `protobuf:"bytes,2,opt,name=long_running,json=longRunning,proto3" json:"long_running,omitempty"` // List of top-level fields of the request message, that should be // automatically populated by the client libraries based on their @@ -1051,9 +1064,9 @@ type MethodSettings struct { // // publishing: // method_settings: - // - selector: google.example.v1.ExampleService.CreateExample - // auto_populated_fields: - // - request_id + // - selector: google.example.v1.ExampleService.CreateExample + // auto_populated_fields: + // - request_id AutoPopulatedFields []string `protobuf:"bytes,3,rep,name=auto_populated_fields,json=autoPopulatedFields,proto3" json:"auto_populated_fields,omitempty"` } @@ -1110,6 +1123,60 @@ func (x *MethodSettings) GetAutoPopulatedFields() []string { return nil } +// Experimental features to be included during client library generation. +// These fields will be deprecated once the feature graduates and is enabled +// by default. +type PythonSettings_ExperimentalFeatures struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Enables generation of asynchronous REST clients if `rest` transport is + // enabled. By default, asynchronous REST clients will not be generated. + // This feature will be enabled by default 1 month after launching the + // feature in preview packages. + RestAsyncIoEnabled bool `protobuf:"varint,1,opt,name=rest_async_io_enabled,json=restAsyncIoEnabled,proto3" json:"rest_async_io_enabled,omitempty"` +} + +func (x *PythonSettings_ExperimentalFeatures) Reset() { + *x = PythonSettings_ExperimentalFeatures{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PythonSettings_ExperimentalFeatures) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PythonSettings_ExperimentalFeatures) ProtoMessage() {} + +func (x *PythonSettings_ExperimentalFeatures) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PythonSettings_ExperimentalFeatures.ProtoReflect.Descriptor instead. +func (*PythonSettings_ExperimentalFeatures) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *PythonSettings_ExperimentalFeatures) GetRestAsyncIoEnabled() bool { + if x != nil { + return x.RestAsyncIoEnabled + } + return false +} + // Describes settings to use when generating API methods that use the // long-running operation pattern. // All default values below are from those used in the client library @@ -1138,7 +1205,7 @@ type MethodSettings_LongRunning struct { func (x *MethodSettings_LongRunning) Reset() { *x = MethodSettings_LongRunning{} if protoimpl.UnsafeEnabled { - mi := &file_google_api_client_proto_msgTypes[15] + mi := &file_google_api_client_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1151,7 +1218,7 @@ func (x *MethodSettings_LongRunning) String() string { func (*MethodSettings_LongRunning) ProtoMessage() {} func (x *MethodSettings_LongRunning) ProtoReflect() protoreflect.Message { - mi := &file_google_api_client_proto_msgTypes[15] + mi := &file_google_api_client_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1456,132 +1523,143 @@ var file_google_api_client_proto_rawDesc = []byte{ 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4c, 0x0a, - 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, - 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4a, 0x0a, 0x0c, 0x4e, - 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, - 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, - 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e, - 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xfd, 0x01, + 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x64, 0x0a, 0x15, + 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, + 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x14, 0x65, 0x78, + 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x1a, 0x49, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x15, 0x72, 0x65, + 0x73, 0x74, 0x5f, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x6f, 0x5f, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x72, 0x65, 0x73, 0x74, 0x41, + 0x73, 0x79, 0x6e, 0x63, 0x49, 0x6f, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x4a, 0x0a, + 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, + 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, + 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, + 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, + 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, + 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, + 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x12, 0x38, 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, + 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, + 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, + 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, - 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, - 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, - 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, - 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, - 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x67, - 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38, - 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, 0x6e, 0x64, - 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, - 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, - 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79, - 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, - 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, - 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xc2, - 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, - 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, - 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, - 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, - 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x6f, - 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, 0x6f, 0x70, - 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, 0x02, 0x0a, - 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, 0x12, - 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, - 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x22, 0xc2, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, + 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, + 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, + 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, + 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, + 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, + 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, + 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, + 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, + 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, + 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, + 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, + 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, - 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, - 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x4d, - 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, 0x78, - 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, 0x61, - 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, 0x6f, - 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, - 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, - 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, - 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x53, - 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, 0x12, 0x0f, - 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x04, 0x12, - 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x07, 0x0a, - 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, - 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, - 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, 0x13, 0x0a, - 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x52, - 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x6d, - 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x3a, 0x43, - 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1f, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48, - 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, 0x6f, - 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x61, 0x75, - 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x69, - 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, - 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, - 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, + 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, + 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, + 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, + 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, + 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45, + 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, + 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, + 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, + 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, + 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, + 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, + 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, + 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70, + 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x42, 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( @@ -1597,34 +1675,35 @@ func file_google_api_client_proto_rawDescGZIP() []byte { } var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 16) +var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 17) var file_google_api_client_proto_goTypes = []interface{}{ - (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization - (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination - (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings - (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings - (*Publishing)(nil), // 4: google.api.Publishing - (*JavaSettings)(nil), // 5: google.api.JavaSettings - (*CppSettings)(nil), // 6: google.api.CppSettings - (*PhpSettings)(nil), // 7: google.api.PhpSettings - (*PythonSettings)(nil), // 8: google.api.PythonSettings - (*NodeSettings)(nil), // 9: google.api.NodeSettings - (*DotnetSettings)(nil), // 10: google.api.DotnetSettings - (*RubySettings)(nil), // 11: google.api.RubySettings - (*GoSettings)(nil), // 12: google.api.GoSettings - (*MethodSettings)(nil), // 13: google.api.MethodSettings - nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry - nil, // 15: google.api.DotnetSettings.RenamedServicesEntry - nil, // 16: google.api.DotnetSettings.RenamedResourcesEntry - (*MethodSettings_LongRunning)(nil), // 17: google.api.MethodSettings.LongRunning - (api.LaunchStage)(0), // 18: google.api.LaunchStage - (*durationpb.Duration)(nil), // 19: google.protobuf.Duration - (*descriptorpb.MethodOptions)(nil), // 20: google.protobuf.MethodOptions - (*descriptorpb.ServiceOptions)(nil), // 21: google.protobuf.ServiceOptions + (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization + (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination + (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings + (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings + (*Publishing)(nil), // 4: google.api.Publishing + (*JavaSettings)(nil), // 5: google.api.JavaSettings + (*CppSettings)(nil), // 6: google.api.CppSettings + (*PhpSettings)(nil), // 7: google.api.PhpSettings + (*PythonSettings)(nil), // 8: google.api.PythonSettings + (*NodeSettings)(nil), // 9: google.api.NodeSettings + (*DotnetSettings)(nil), // 10: google.api.DotnetSettings + (*RubySettings)(nil), // 11: google.api.RubySettings + (*GoSettings)(nil), // 12: google.api.GoSettings + (*MethodSettings)(nil), // 13: google.api.MethodSettings + nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry + (*PythonSettings_ExperimentalFeatures)(nil), // 15: google.api.PythonSettings.ExperimentalFeatures + nil, // 16: google.api.DotnetSettings.RenamedServicesEntry + nil, // 17: google.api.DotnetSettings.RenamedResourcesEntry + (*MethodSettings_LongRunning)(nil), // 18: google.api.MethodSettings.LongRunning + (api.LaunchStage)(0), // 19: google.api.LaunchStage + (*durationpb.Duration)(nil), // 20: google.protobuf.Duration + (*descriptorpb.MethodOptions)(nil), // 21: google.protobuf.MethodOptions + (*descriptorpb.ServiceOptions)(nil), // 22: google.protobuf.ServiceOptions } var file_google_api_client_proto_depIdxs = []int32{ 1, // 0: google.api.CommonLanguageSettings.destinations:type_name -> google.api.ClientLibraryDestination - 18, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage + 19, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage 5, // 2: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings 6, // 3: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings 7, // 4: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings @@ -1641,25 +1720,26 @@ var file_google_api_client_proto_depIdxs = []int32{ 2, // 15: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings 2, // 16: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings 2, // 17: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 18: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 19: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings - 15, // 20: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry - 16, // 21: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry - 2, // 22: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 23: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings - 17, // 24: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning - 19, // 25: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration - 19, // 26: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration - 19, // 27: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration - 20, // 28: google.api.method_signature:extendee -> google.protobuf.MethodOptions - 21, // 29: google.api.default_host:extendee -> google.protobuf.ServiceOptions - 21, // 30: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions - 21, // 31: google.api.api_version:extendee -> google.protobuf.ServiceOptions - 32, // [32:32] is the sub-list for method output_type - 32, // [32:32] is the sub-list for method input_type - 32, // [32:32] is the sub-list for extension type_name - 28, // [28:32] is the sub-list for extension extendee - 0, // [0:28] is the sub-list for field type_name + 15, // 18: google.api.PythonSettings.experimental_features:type_name -> google.api.PythonSettings.ExperimentalFeatures + 2, // 19: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 20: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings + 16, // 21: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry + 17, // 22: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry + 2, // 23: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 24: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings + 18, // 25: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning + 20, // 26: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration + 20, // 27: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration + 20, // 28: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration + 21, // 29: google.api.method_signature:extendee -> google.protobuf.MethodOptions + 22, // 30: google.api.default_host:extendee -> google.protobuf.ServiceOptions + 22, // 31: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions + 22, // 32: google.api.api_version:extendee -> google.protobuf.ServiceOptions + 33, // [33:33] is the sub-list for method output_type + 33, // [33:33] is the sub-list for method input_type + 33, // [33:33] is the sub-list for extension type_name + 29, // [29:33] is the sub-list for extension extendee + 0, // [0:29] is the sub-list for field type_name } func init() { file_google_api_client_proto_init() } @@ -1812,7 +1892,19 @@ func file_google_api_client_proto_init() { return nil } } - file_google_api_client_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_google_api_client_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PythonSettings_ExperimentalFeatures); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MethodSettings_LongRunning); i { case 0: return &v.state @@ -1831,7 +1923,7 @@ func file_google_api_client_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_api_client_proto_rawDesc, NumEnums: 2, - NumMessages: 16, + NumMessages: 17, NumExtensions: 4, NumServices: 0, }, diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go index d339dfb02ac1..a462e7d01327 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go @@ -121,6 +121,11 @@ type FieldInfo struct { // any API consumer, just documents the API's format for the field it is // applied to. Format FieldInfo_Format `protobuf:"varint,1,opt,name=format,proto3,enum=google.api.FieldInfo_Format" json:"format,omitempty"` + // The type(s) that the annotated, generic field may represent. + // + // Currently, this must only be used on fields of type `google.protobuf.Any`. + // Supporting other generic types may be considered in the future. + ReferencedTypes []*TypeReference `protobuf:"bytes,2,rep,name=referenced_types,json=referencedTypes,proto3" json:"referenced_types,omitempty"` } func (x *FieldInfo) Reset() { @@ -162,6 +167,70 @@ func (x *FieldInfo) GetFormat() FieldInfo_Format { return FieldInfo_FORMAT_UNSPECIFIED } +func (x *FieldInfo) GetReferencedTypes() []*TypeReference { + if x != nil { + return x.ReferencedTypes + } + return nil +} + +// A reference to a message type, for use in [FieldInfo][google.api.FieldInfo]. +type TypeReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the type that the annotated, generic field may represent. + // If the type is in the same protobuf package, the value can be the simple + // message name e.g., `"MyMessage"`. Otherwise, the value must be the + // fully-qualified message name e.g., `"google.library.v1.Book"`. + // + // If the type(s) are unknown to the service (e.g. the field accepts generic + // user input), use the wildcard `"*"` to denote this behavior. + // + // See [AIP-202](https://google.aip.dev/202#type-references) for more details. + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` +} + +func (x *TypeReference) Reset() { + *x = TypeReference{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_field_info_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TypeReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TypeReference) ProtoMessage() {} + +func (x *TypeReference) ProtoReflect() protoreflect.Message { + mi := &file_google_api_field_info_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TypeReference.ProtoReflect.Descriptor instead. +func (*TypeReference) Descriptor() ([]byte, []int) { + return file_google_api_field_info_proto_rawDescGZIP(), []int{1} +} + +func (x *TypeReference) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + var file_google_api_field_info_proto_extTypes = []protoimpl.ExtensionInfo{ { ExtendedType: (*descriptorpb.FieldOptions)(nil), @@ -185,6 +254,13 @@ var ( // string actual_ip_address = 4 [ // (google.api.field_info).format = IPV4_OR_IPV6 // ]; + // google.protobuf.Any generic_field = 5 [ + // (google.api.field_info).referenced_types = {type_name: "ActualType"}, + // (google.api.field_info).referenced_types = {type_name: "OtherType"}, + // ]; + // google.protobuf.Any generic_user_input = 5 [ + // (google.api.field_info).referenced_types = {type_name: "*"}, + // ]; // // optional google.api.FieldInfo field_info = 291403980; E_FieldInfo = &file_google_api_field_info_proto_extTypes[0] @@ -197,30 +273,37 @@ var file_google_api_field_info_proto_rawDesc = []byte{ 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, 0x01, 0x0a, 0x09, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xda, 0x01, 0x0a, 0x09, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x34, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, - 0x2e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, - 0x51, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x16, 0x0a, 0x12, 0x46, 0x4f, 0x52, - 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x55, 0x49, 0x44, 0x34, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, - 0x49, 0x50, 0x56, 0x34, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x56, 0x36, 0x10, 0x03, - 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x50, 0x56, 0x34, 0x5f, 0x4f, 0x52, 0x5f, 0x49, 0x50, 0x56, 0x36, - 0x10, 0x04, 0x3a, 0x57, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, - 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0xcc, 0xf1, 0xf9, 0x8a, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, - 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x6c, 0x0a, 0x0e, 0x63, - 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, - 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x2e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x44, 0x0a, 0x10, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x64, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x64, + 0x54, 0x79, 0x70, 0x65, 0x73, 0x22, 0x51, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x16, 0x0a, 0x12, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x55, 0x49, 0x44, 0x34, + 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x56, 0x34, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, + 0x49, 0x50, 0x56, 0x36, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x50, 0x56, 0x34, 0x5f, 0x4f, + 0x52, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x04, 0x22, 0x2c, 0x0a, 0x0d, 0x54, 0x79, 0x70, 0x65, + 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, + 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x3a, 0x57, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, + 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0xcc, 0xf1, 0xf9, 0x8a, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x42, + 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x42, 0x0e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, + 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -236,21 +319,23 @@ func file_google_api_field_info_proto_rawDescGZIP() []byte { } var file_google_api_field_info_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_google_api_field_info_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_api_field_info_proto_msgTypes = make([]protoimpl.MessageInfo, 2) var file_google_api_field_info_proto_goTypes = []interface{}{ (FieldInfo_Format)(0), // 0: google.api.FieldInfo.Format (*FieldInfo)(nil), // 1: google.api.FieldInfo - (*descriptorpb.FieldOptions)(nil), // 2: google.protobuf.FieldOptions + (*TypeReference)(nil), // 2: google.api.TypeReference + (*descriptorpb.FieldOptions)(nil), // 3: google.protobuf.FieldOptions } var file_google_api_field_info_proto_depIdxs = []int32{ 0, // 0: google.api.FieldInfo.format:type_name -> google.api.FieldInfo.Format - 2, // 1: google.api.field_info:extendee -> google.protobuf.FieldOptions - 1, // 2: google.api.field_info:type_name -> google.api.FieldInfo - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 2, // [2:3] is the sub-list for extension type_name - 1, // [1:2] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name + 2, // 1: google.api.FieldInfo.referenced_types:type_name -> google.api.TypeReference + 3, // 2: google.api.field_info:extendee -> google.protobuf.FieldOptions + 1, // 3: google.api.field_info:type_name -> google.api.FieldInfo + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 3, // [3:4] is the sub-list for extension type_name + 2, // [2:3] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name } func init() { file_google_api_field_info_proto_init() } @@ -271,6 +356,18 @@ func file_google_api_field_info_proto_init() { return nil } } + file_google_api_field_info_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TypeReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -278,7 +375,7 @@ func file_google_api_field_info_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_api_field_info_proto_rawDesc, NumEnums: 1, - NumMessages: 1, + NumMessages: 2, NumExtensions: 1, NumServices: 0, }, diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go index 76ea76df330f..ffb5838cb18b 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go @@ -102,7 +102,7 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { return false } -// # gRPC Transcoding +// gRPC Transcoding // // gRPC Transcoding is a feature for mapping between a gRPC method and one or // more HTTP REST endpoints. It allows developers to build a single API service @@ -143,9 +143,8 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // This enables an HTTP REST to gRPC mapping as below: // -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` +// - HTTP: `GET /v1/messages/123456` +// - gRPC: `GetMessage(name: "messages/123456")` // // Any fields in the request message which are not bound by the path template // automatically become HTTP query parameters if there is no HTTP request body. @@ -169,11 +168,9 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // This enables a HTTP JSON to RPC mapping as below: // -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | -// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: -// "foo"))` +// - HTTP: `GET /v1/messages/123456?revision=2&sub.subfield=foo` +// - gRPC: `GetMessage(message_id: "123456" revision: 2 sub: +// SubMessage(subfield: "foo"))` // // Note that fields which are mapped to URL query parameters must have a // primitive type or a repeated primitive type or a non-repeated message type. @@ -203,10 +200,8 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // representation of the JSON in the request body is determined by // protos JSON encoding: // -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -// "123456" message { text: "Hi!" })` +// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` +// - gRPC: `UpdateMessage(message_id: "123456" message { text: "Hi!" })` // // The special name `*` can be used in the body mapping to define that // every field not bound by the path template should be mapped to the @@ -228,10 +223,8 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // The following HTTP JSON to RPC mapping is enabled: // -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -// "123456" text: "Hi!")` +// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` +// - gRPC: `UpdateMessage(message_id: "123456" text: "Hi!")` // // Note that when using `*` in the body mapping, it is not possible to // have HTTP parameters, as all fields not bound by the path end in @@ -259,13 +252,13 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // This enables the following two alternative HTTP JSON to RPC mappings: // -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` -// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: -// "123456")` +// - HTTP: `GET /v1/messages/123456` +// - gRPC: `GetMessage(message_id: "123456")` // -// ## Rules for HTTP mapping +// - HTTP: `GET /v1/users/me/messages/123456` +// - gRPC: `GetMessage(user_id: "me" message_id: "123456")` +// +// # Rules for HTTP mapping // // 1. Leaf request fields (recursive expansion nested messages in the request // message) are classified into three categories: @@ -284,7 +277,7 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // request body, all // fields are passed via URL path and URL query parameters. // -// ### Path template syntax +// Path template syntax // // Template = "/" Segments [ Verb ] ; // Segments = Segment { "/" Segment } ; @@ -323,7 +316,7 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // Document](https://developers.google.com/discovery/v1/reference/apis) as // `{+var}`. // -// ## Using gRPC API Service Configuration +// # Using gRPC API Service Configuration // // gRPC API Service Configuration (service config) is a configuration language // for configuring a gRPC service to become a user-facing product. The @@ -338,15 +331,14 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // specified in the service config will override any matching transcoding // configuration in the proto. // -// Example: +// The following example selects a gRPC method and applies an `HttpRule` to it: // // http: // rules: -// # Selects a gRPC method and applies HttpRule to it. // - selector: example.v1.Messaging.GetMessage // get: /v1/messages/{message_id}/{sub.subfield} // -// ## Special notes +// # Special notes // // When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the // proto to JSON conversion must follow the [proto3 diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go index 7a3fd93fcd96..b5db279aebf6 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go @@ -253,8 +253,13 @@ type ResourceDescriptor struct { History ResourceDescriptor_History `protobuf:"varint,4,opt,name=history,proto3,enum=google.api.ResourceDescriptor_History" json:"history,omitempty"` // The plural name used in the resource name and permission names, such as // 'projects' for the resource name of 'projects/{project}' and the permission - // name of 'cloudresourcemanager.googleapis.com/projects.get'. It is the same - // concept of the `plural` field in k8s CRD spec + // name of 'cloudresourcemanager.googleapis.com/projects.get'. One exception + // to this is for Nested Collections that have stuttering names, as defined + // in [AIP-122](https://google.aip.dev/122#nested-collections), where the + // collection ID in the resource name pattern does not necessarily directly + // match the `plural` value. + // + // It is the same concept of the `plural` field in k8s CRD spec // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ // // Note: The plural form is required even for singleton resources. See diff --git a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go index 6b867a46ede9..c90c6015d26c 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go @@ -1105,25 +1105,66 @@ func (x *Expr_CreateStruct) GetEntries() []*Expr_CreateStruct_Entry { // messages `has(m.x)` is defined as 'defined, but not set`. For proto3, the // macro tests whether the property is set to its default. For map and struct // types, the macro tests whether the property `x` is defined on `m`. +// +// Comprehensions for the standard environment macros evaluation can be best +// visualized as the following pseudocode: +// +// ``` +// let `accu_var` = `accu_init` +// +// for (let `iter_var` in `iter_range`) { +// if (!`loop_condition`) { +// break +// } +// `accu_var` = `loop_step` +// } +// +// return `result` +// ``` +// +// Comprehensions for the optional V2 macros which support map-to-map +// translation differ slightly from the standard environment macros in that +// they expose both the key or index in addition to the value for each list +// or map entry: +// +// ``` +// let `accu_var` = `accu_init` +// +// for (let `iter_var`, `iter_var2` in `iter_range`) { +// if (!`loop_condition`) { +// break +// } +// `accu_var` = `loop_step` +// } +// +// return `result` +// ``` type Expr_Comprehension struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The name of the iteration variable. + // The name of the first iteration variable. + // When the iter_range is a list, this variable is the list element. + // When the iter_range is a map, this variable is the map entry key. IterVar string `protobuf:"bytes,1,opt,name=iter_var,json=iterVar,proto3" json:"iter_var,omitempty"` - // The range over which var iterates. + // The name of the second iteration variable, empty if not set. + // When the iter_range is a list, this variable is the integer index. + // When the iter_range is a map, this variable is the map entry value. + // This field is only set for comprehension v2 macros. + IterVar2 string `protobuf:"bytes,8,opt,name=iter_var2,json=iterVar2,proto3" json:"iter_var2,omitempty"` + // The range over which the comprehension iterates. IterRange *Expr `protobuf:"bytes,2,opt,name=iter_range,json=iterRange,proto3" json:"iter_range,omitempty"` // The name of the variable used for accumulation of the result. AccuVar string `protobuf:"bytes,3,opt,name=accu_var,json=accuVar,proto3" json:"accu_var,omitempty"` // The initial value of the accumulator. AccuInit *Expr `protobuf:"bytes,4,opt,name=accu_init,json=accuInit,proto3" json:"accu_init,omitempty"` - // An expression which can contain iter_var and accu_var. + // An expression which can contain iter_var, iter_var2, and accu_var. // // Returns false when the result has been computed and may be used as // a hint to short-circuit the remainder of the comprehension. LoopCondition *Expr `protobuf:"bytes,5,opt,name=loop_condition,json=loopCondition,proto3" json:"loop_condition,omitempty"` - // An expression which can contain iter_var and accu_var. + // An expression which can contain iter_var, iter_var2, and accu_var. // // Computes the next value of accu_var. LoopStep *Expr `protobuf:"bytes,6,opt,name=loop_step,json=loopStep,proto3" json:"loop_step,omitempty"` @@ -1172,6 +1213,13 @@ func (x *Expr_Comprehension) GetIterVar() string { return "" } +func (x *Expr_Comprehension) GetIterVar2() string { + if x != nil { + return x.IterVar2 + } + return "" +} + func (x *Expr_Comprehension) GetIterRange() *Expr { if x != nil { return x.IterRange @@ -1485,7 +1533,7 @@ var file_google_api_expr_v1alpha1_syntax_proto_rawDesc = []byte{ 0x66, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xae, 0x0d, 0x0a, 0x04, 0x45, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xcb, 0x0d, 0x0a, 0x04, 0x45, 0x78, 0x70, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x43, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, @@ -1567,132 +1615,134 @@ var file_google_api_expr_v1alpha1_syntax_proto_rawDesc = []byte{ 0x45, 0x78, 0x70, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x42, 0x0a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x1a, 0xfd, - 0x02, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x72, 0x79, 0x42, 0x0a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x1a, 0x9a, + 0x03, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x72, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x69, 0x74, 0x65, 0x72, 0x56, 0x61, 0x72, 0x12, 0x3d, 0x0a, 0x0a, 0x69, - 0x74, 0x65, 0x72, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, - 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, - 0x09, 0x69, 0x74, 0x65, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x63, - 0x63, 0x75, 0x5f, 0x76, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, - 0x63, 0x75, 0x56, 0x61, 0x72, 0x12, 0x3b, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x75, 0x5f, 0x69, 0x6e, - 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x61, 0x63, 0x63, 0x75, 0x49, 0x6e, - 0x69, 0x74, 0x12, 0x45, 0x0a, 0x0e, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x0d, 0x6c, 0x6f, 0x6f, 0x70, - 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x09, 0x6c, 0x6f, 0x6f, - 0x70, 0x5f, 0x73, 0x74, 0x65, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, + 0x28, 0x09, 0x52, 0x07, 0x69, 0x74, 0x65, 0x72, 0x56, 0x61, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x69, + 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x72, 0x32, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x69, 0x74, 0x65, 0x72, 0x56, 0x61, 0x72, 0x32, 0x12, 0x3d, 0x0a, 0x0a, 0x69, 0x74, 0x65, 0x72, + 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x6c, 0x6f, - 0x6f, 0x70, 0x53, 0x74, 0x65, 0x70, 0x12, 0x36, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, - 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x42, 0x0b, - 0x0a, 0x09, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc1, 0x03, 0x0a, 0x08, - 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, - 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, - 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, - 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, - 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, - 0x00, 0x52, 0x0b, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, - 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, - 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, - 0x73, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, - 0x0a, 0x62, 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x46, 0x0a, 0x0e, 0x64, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, - 0x18, 0x01, 0x48, 0x00, 0x52, 0x0d, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x49, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0e, - 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, - 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, - 0x8c, 0x07, 0x0a, 0x0a, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x25, - 0x0a, 0x0e, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0b, 0x6c, 0x69, 0x6e, 0x65, 0x4f, 0x66, 0x66, - 0x73, 0x65, 0x74, 0x73, 0x12, 0x51, 0x0a, 0x09, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x09, 0x69, 0x74, + 0x65, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x75, 0x5f, + 0x76, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x63, 0x75, 0x56, + 0x61, 0x72, 0x12, 0x3b, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x75, 0x5f, 0x69, 0x6e, 0x69, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, + 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x61, 0x63, 0x63, 0x75, 0x49, 0x6e, 0x69, 0x74, 0x12, + 0x45, 0x0a, 0x0e, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, 0x6f, - 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x70, 0x6f, - 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0b, 0x6d, 0x61, 0x63, 0x72, 0x6f, - 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, - 0x66, 0x6f, 0x2e, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x0a, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x12, 0x4e, - 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x80, - 0x03, 0x0a, 0x09, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x69, 0x0a, 0x13, - 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, - 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x0d, 0x6c, 0x6f, 0x6f, 0x70, 0x43, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x09, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, 0x73, + 0x74, 0x65, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, - 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, - 0x65, 0x6e, 0x74, 0x52, 0x12, 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6d, - 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x50, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x35, 0x0a, 0x07, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, - 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, - 0x22, 0x6f, 0x0a, 0x09, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, - 0x15, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, - 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4f, 0x4d, 0x50, - 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x50, 0x41, 0x52, 0x53, 0x45, 0x52, 0x10, 0x01, 0x12, 0x1a, - 0x0a, 0x16, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x45, 0x52, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, - 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, - 0x03, 0x1a, 0x3c, 0x0a, 0x0e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, - 0x5d, 0x0a, 0x0f, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x34, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x6c, 0x6f, 0x6f, 0x70, 0x53, + 0x74, 0x65, 0x70, 0x12, 0x36, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, - 0x78, 0x70, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x70, - 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, - 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6f, 0x66, - 0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x04, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6c, 0x75, - 0x6d, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, - 0x42, 0x6e, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x78, 0x70, 0x72, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x42, 0x0b, 0x0a, 0x09, 0x65, + 0x78, 0x70, 0x72, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc1, 0x03, 0x0a, 0x08, 0x43, 0x6f, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x36, + 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0b, + 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x64, + 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x46, 0x0a, 0x0e, 0x64, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x18, 0x01, 0x48, + 0x00, 0x52, 0x0d, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x49, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0e, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, 0x0a, 0x0d, 0x63, + 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0x8c, 0x07, 0x0a, + 0x0a, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x25, 0x0a, 0x0e, 0x73, + 0x79, 0x6e, 0x74, 0x61, 0x78, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, + 0x0a, 0x0c, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x05, 0x52, 0x0b, 0x6c, 0x69, 0x6e, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, + 0x73, 0x12, 0x51, 0x0a, 0x09, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x70, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0b, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x5f, 0x63, 0x61, + 0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, + 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x0a, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x12, 0x4e, 0x0a, 0x0a, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x80, 0x03, 0x0a, 0x09, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x69, 0x0a, 0x13, 0x61, 0x66, 0x66, + 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, + 0x52, 0x12, 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, + 0x65, 0x6e, 0x74, 0x73, 0x12, 0x50, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, - 0x42, 0x0b, 0x53, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x3c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, - 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, - 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x35, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x22, 0x6f, 0x0a, + 0x09, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x15, 0x43, 0x4f, + 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, + 0x4e, 0x54, 0x5f, 0x50, 0x41, 0x52, 0x53, 0x45, 0x52, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, + 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x48, + 0x45, 0x43, 0x4b, 0x45, 0x52, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4d, 0x50, 0x4f, + 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x1a, 0x3c, + 0x0a, 0x0e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x5d, 0x0a, 0x0f, + 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x34, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, + 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x70, 0x0a, 0x0e, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, + 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, + 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, + 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x04, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x42, 0x6e, 0x0a, + 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x0b, 0x53, + 0x79, 0x6e, 0x74, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3c, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, + 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md index 6a8a07781ae3..5d4096d46a04 100644 --- a/vendor/google.golang.org/grpc/MAINTAINERS.md +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -9,21 +9,28 @@ for general contribution guidelines. ## Maintainers (in alphabetical order) +- [aranjans](https://github.com/aranjans), Google LLC +- [arjan-bal](https://github.com/arjan-bal), Google LLC +- [arvindbr8](https://github.com/arvindbr8), Google LLC - [atollena](https://github.com/atollena), Datadog, Inc. -- [cesarghali](https://github.com/cesarghali), Google LLC - [dfawley](https://github.com/dfawley), Google LLC - [easwars](https://github.com/easwars), Google LLC -- [menghanl](https://github.com/menghanl), Google LLC -- [srini100](https://github.com/srini100), Google LLC +- [erm-g](https://github.com/erm-g), Google LLC +- [gtcooke94](https://github.com/gtcooke94), Google LLC +- [purnesh42h](https://github.com/purnesh42h), Google LLC +- [zasweq](https://github.com/zasweq), Google LLC ## Emeritus Maintainers (in alphabetical order) -- [adelez](https://github.com/adelez), Google LLC -- [canguler](https://github.com/canguler), Google LLC -- [iamqizhao](https://github.com/iamqizhao), Google LLC -- [jadekler](https://github.com/jadekler), Google LLC -- [jtattermusch](https://github.com/jtattermusch), Google LLC -- [lyuxuan](https://github.com/lyuxuan), Google LLC -- [makmukhi](https://github.com/makmukhi), Google LLC -- [matt-kwong](https://github.com/matt-kwong), Google LLC -- [nicolasnoble](https://github.com/nicolasnoble), Google LLC -- [yongni](https://github.com/yongni), Google LLC +- [adelez](https://github.com/adelez) +- [canguler](https://github.com/canguler) +- [cesarghali](https://github.com/cesarghali) +- [iamqizhao](https://github.com/iamqizhao) +- [jeanbza](https://github.com/jeanbza) +- [jtattermusch](https://github.com/jtattermusch) +- [lyuxuan](https://github.com/lyuxuan) +- [makmukhi](https://github.com/makmukhi) +- [matt-kwong](https://github.com/matt-kwong) +- [menghanl](https://github.com/menghanl) +- [nicolasnoble](https://github.com/nicolasnoble) +- [srini100](https://github.com/srini100) +- [yongni](https://github.com/yongni) diff --git a/vendor/google.golang.org/grpc/SECURITY.md b/vendor/google.golang.org/grpc/SECURITY.md index be6e108705c4..abab279379ba 100644 --- a/vendor/google.golang.org/grpc/SECURITY.md +++ b/vendor/google.golang.org/grpc/SECURITY.md @@ -1,3 +1,3 @@ # Security Policy -For information on gRPC Security Policy and reporting potentional security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). +For information on gRPC Security Policy and reporting potential security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). diff --git a/vendor/google.golang.org/grpc/backoff/backoff.go b/vendor/google.golang.org/grpc/backoff/backoff.go index 0787d0b50ce9..d7b40b7cb66f 100644 --- a/vendor/google.golang.org/grpc/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/backoff/backoff.go @@ -39,7 +39,7 @@ type Config struct { MaxDelay time.Duration } -// DefaultConfig is a backoff configuration with the default values specfied +// DefaultConfig is a backoff configuration with the default values specified // at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. // // This should be useful for callers who want to configure backoff with diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index f391744f7299..b181f386a1ba 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + estats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/metadata" @@ -72,8 +73,21 @@ func unregisterForTesting(name string) { delete(m, name) } +// connectedAddress returns the connected address for a SubConnState. The +// address is only valid if the state is READY. +func connectedAddress(scs SubConnState) resolver.Address { + return scs.connectedAddress +} + +// setConnectedAddress sets the connected address for a SubConnState. +func setConnectedAddress(scs *SubConnState, addr resolver.Address) { + scs.connectedAddress = addr +} + func init() { internal.BalancerUnregister = unregisterForTesting + internal.ConnectedAddress = connectedAddress + internal.SetConnectedAddress = setConnectedAddress } // Get returns the resolver builder registered with the given name. @@ -243,6 +257,10 @@ type BuildOptions struct { // same resolver.Target as passed to the resolver. See the documentation for // the resolver.Target type for details about what it contains. Target resolver.Target + // MetricsRecorder is the metrics recorder that balancers can use to record + // metrics. Balancer implementations which do not register metrics on + // metrics registry and record on them can ignore this field. + MetricsRecorder estats.MetricsRecorder } // Builder creates a balancer. @@ -410,6 +428,9 @@ type SubConnState struct { // ConnectionError is set if the ConnectivityState is TransientFailure, // describing the reason the SubConn failed. Otherwise, it is nil. ConnectionError error + // connectedAddr contains the connected address when ConnectivityState is + // Ready. Otherwise, it is indeterminate. + connectedAddress resolver.Address } // ClientConnState describes the state of a ClientConn relevant to the diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index a7f1eeec8e6a..2b87bd79c757 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -36,7 +36,7 @@ type baseBuilder struct { config Config } -func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { +func (bb *baseBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { bal := &baseBalancer{ cc: cc, pickerBuilder: bb.pickerBuilder, @@ -259,6 +259,6 @@ type errPicker struct { err error // Pick() always returns this err. } -func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { +func (p *errPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { return balancer.PickResult{}, p.err } diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go index 07527603f1d4..4d69b4052f8e 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -50,7 +50,7 @@ const ( type pickfirstBuilder struct{} -func (pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { +func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { b := &pickfirstBalancer{cc: cc} b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) return b @@ -155,7 +155,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // Endpoints not set, process addresses until we migrate resolver // emissions fully to Endpoints. The top channel does wrap emitted // addresses with endpoints, however some balancers such as weighted - // target do not forwarrd the corresponding correct endpoints down/split + // target do not forward the corresponding correct endpoints down/split // endpoints properly. Once all balancers correctly forward endpoints // down, can delete this else conditional. addrs = state.ResolverState.Addresses diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go index 4161fdf47a8b..8ad6ce2f0950 100644 --- a/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -25,12 +25,15 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" ) +var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address)) + // ccBalancerWrapper sits between the ClientConn and the Balancer. // // ccBalancerWrapper implements methods corresponding to the ones on the @@ -79,6 +82,7 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { CustomUserAgent: cc.dopts.copts.UserAgent, ChannelzParent: cc.channelz, Target: cc.parsedTarget, + MetricsRecorder: cc.metricsRecorderList, }, serializer: grpcsync.NewCallbackSerializer(ctx), serializerCancel: cancel, @@ -92,7 +96,7 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { // it is safe to call into the balancer here. func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { errCh := make(chan error) - ok := ccb.serializer.Schedule(func(ctx context.Context) { + uccs := func(ctx context.Context) { defer close(errCh) if ctx.Err() != nil || ccb.balancer == nil { return @@ -107,17 +111,23 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat logger.Infof("error from balancer.UpdateClientConnState: %v", err) } errCh <- err - }) - if !ok { - return nil } + onFailure := func() { close(errCh) } + + // UpdateClientConnState can race with Close, and when the latter wins, the + // serializer is closed, and the attempt to schedule the callback will fail. + // It is acceptable to ignore this failure. But since we want to handle the + // state update in a blocking fashion (when we successfully schedule the + // callback), we have to use the ScheduleOr method and not the MaybeSchedule + // method on the serializer. + ccb.serializer.ScheduleOr(uccs, onFailure) return <-errCh } // resolverError is invoked by grpc to push a resolver error to the underlying // balancer. The call to the balancer is executed from the serializer. func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.serializer.Schedule(func(ctx context.Context) { + ccb.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || ccb.balancer == nil { return } @@ -133,7 +143,7 @@ func (ccb *ccBalancerWrapper) close() { ccb.closed = true ccb.mu.Unlock() channelz.Info(logger, ccb.cc.channelz, "ccBalancerWrapper: closing") - ccb.serializer.Schedule(func(context.Context) { + ccb.serializer.TrySchedule(func(context.Context) { if ccb.balancer == nil { return } @@ -145,7 +155,7 @@ func (ccb *ccBalancerWrapper) close() { // exitIdle invokes the balancer's exitIdle method in the serializer. func (ccb *ccBalancerWrapper) exitIdle() { - ccb.serializer.Schedule(func(ctx context.Context) { + ccb.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || ccb.balancer == nil { return } @@ -182,7 +192,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer return acbw, nil } -func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { +func (ccb *ccBalancerWrapper) RemoveSubConn(balancer.SubConn) { // The graceful switch balancer will never call this. logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc") } @@ -252,15 +262,29 @@ type acBalancerWrapper struct { // updateState is invoked by grpc to push a subConn state update to the // underlying balancer. -func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) { - acbw.ccb.serializer.Schedule(func(ctx context.Context) { +func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolver.Address, err error) { + acbw.ccb.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || acbw.ccb.balancer == nil { return } // Even though it is optional for balancers, gracefulswitch ensures // opts.StateListener is set, so this cannot ever be nil. // TODO: delete this comment when UpdateSubConnState is removed. - acbw.stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) + scs := balancer.SubConnState{ConnectivityState: s, ConnectionError: err} + if s == connectivity.Ready { + setConnectedAddress(&scs, curAddr) + } + acbw.stateListener(scs) + acbw.ac.mu.Lock() + defer acbw.ac.mu.Unlock() + if s == connectivity.Ready { + // When changing states to READY, reset stateReadyChan. Wait until + // after we notify the LB policy's listener(s) in order to prevent + // ac.getTransport() from unblocking before the LB policy starts + // tracking the subchannel as READY. + close(acbw.ac.stateReadyChan) + acbw.ac.stateReadyChan = make(chan struct{}) + } }) } @@ -318,8 +342,8 @@ func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) ( pData := acbw.producers[pb] if pData == nil { // Not found; create a new one and add it to the producers map. - p, close := pb.Build(acbw) - pData = &refCountedProducer{producer: p, close: close} + p, closeFn := pb.Build(acbw) + pData = &refCountedProducer{producer: p, close: closeFn} acbw.producers[pb] = pData } // Account for this new reference. diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 63c639e4fe93..55bffaa77ef0 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,8 +18,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/binlog/v1/binarylog.proto package grpc_binarylog_v1 @@ -1015,7 +1015,7 @@ func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte { var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3) var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_grpc_binlog_v1_binarylog_proto_goTypes = []interface{}{ +var file_grpc_binlog_v1_binarylog_proto_goTypes = []any{ (GrpcLogEntry_EventType)(0), // 0: grpc.binarylog.v1.GrpcLogEntry.EventType (GrpcLogEntry_Logger)(0), // 1: grpc.binarylog.v1.GrpcLogEntry.Logger (Address_Type)(0), // 2: grpc.binarylog.v1.Address.Type @@ -1058,7 +1058,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*GrpcLogEntry); i { case 0: return &v.state @@ -1070,7 +1070,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*ClientHeader); i { case 0: return &v.state @@ -1082,7 +1082,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ServerHeader); i { case 0: return &v.state @@ -1094,7 +1094,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*Trailer); i { case 0: return &v.state @@ -1106,7 +1106,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*Message); i { case 0: return &v.state @@ -1118,7 +1118,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*Metadata); i { case 0: return &v.state @@ -1130,7 +1130,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*MetadataEntry); i { case 0: return &v.state @@ -1142,7 +1142,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*Address); i { case 0: return &v.state @@ -1155,7 +1155,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { } } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []any{ (*GrpcLogEntry_ClientHeader)(nil), (*GrpcLogEntry_ServerHeader)(nil), (*GrpcLogEntry_Message)(nil), diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 423be7b43b00..9c8850e3fdd5 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -24,6 +24,7 @@ import ( "fmt" "math" "net/url" + "slices" "strings" "sync" "sync/atomic" @@ -39,6 +40,7 @@ import ( "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/idle" iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/stats" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" @@ -194,8 +196,11 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz) cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) + cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers) + cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc. cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout) + return cc, nil } @@ -590,13 +595,14 @@ type ClientConn struct { cancel context.CancelFunc // Cancelled on close. // The following are initialized at dial time, and are read-only after that. - target string // User's dial target. - parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder(). - authority string // See initAuthority(). - dopts dialOptions // Default and user specified dial options. - channelz *channelz.Channel // Channelz object. - resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder(). - idlenessMgr *idle.Manager + target string // User's dial target. + parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder(). + authority string // See initAuthority(). + dopts dialOptions // Default and user specified dial options. + channelz *channelz.Channel // Channelz object. + resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder(). + idlenessMgr *idle.Manager + metricsRecorderList *stats.MetricsRecorderList // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. @@ -626,11 +632,6 @@ type ClientConn struct { // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { ch := cc.csMgr.getNotifyChan() if cc.csMgr.getState() != sourceState { @@ -645,11 +646,6 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec } // GetState returns the connectivity.State of ClientConn. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a later -// release. func (cc *ClientConn) GetState() connectivity.State { return cc.csMgr.getState() } @@ -812,17 +808,11 @@ func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) { cc.csMgr.updateState(connectivity.TransientFailure) } -// Makes a copy of the input addresses slice and clears out the balancer -// attributes field. Addresses are passed during subconn creation and address -// update operations. In both cases, we will clear the balancer attributes by -// calling this function, and therefore we will be able to use the Equal method -// provided by the resolver.Address type for comparison. -func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address { +// Makes a copy of the input addresses slice. Addresses are passed during +// subconn creation and address update operations. +func copyAddresses(in []resolver.Address) []resolver.Address { out := make([]resolver.Address, len(in)) - for i := range in { - out[i] = in[i] - out[i].BalancerAttributes = nil - } + copy(out, in) return out } @@ -835,14 +825,14 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer. } ac := &addrConn{ - state: connectivity.Idle, - cc: cc, - addrs: copyAddressesWithoutBalancerAttributes(addrs), - scopts: opts, - dopts: cc.dopts, - channelz: channelz.RegisterSubChannel(cc.channelz, ""), - resetBackoff: make(chan struct{}), - stateChan: make(chan struct{}), + state: connectivity.Idle, + cc: cc, + addrs: copyAddresses(addrs), + scopts: opts, + dopts: cc.dopts, + channelz: channelz.RegisterSubChannel(cc.channelz, ""), + resetBackoff: make(chan struct{}), + stateReadyChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Start with our address set to the first address; this may be updated if @@ -918,28 +908,29 @@ func (ac *addrConn) connect() error { ac.mu.Unlock() return nil } - ac.mu.Unlock() - ac.resetTransport() + ac.resetTransportAndUnlock() return nil } -func equalAddresses(a, b []resolver.Address) bool { - if len(a) != len(b) { - return false - } - for i, v := range a { - if !v.Equal(b[i]) { - return false - } - } - return true +// equalAddressIgnoringBalAttributes returns true is a and b are considered equal. +// This is different from the Equal method on the resolver.Address type which +// considers all fields to determine equality. Here, we only consider fields +// that are meaningful to the subConn. +func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { + return a.Addr == b.Addr && a.ServerName == b.ServerName && + a.Attributes.Equal(b.Attributes) && + a.Metadata == b.Metadata +} + +func equalAddressesIgnoringBalAttributes(a, b []resolver.Address) bool { + return slices.EqualFunc(a, b, func(a, b resolver.Address) bool { return equalAddressIgnoringBalAttributes(&a, &b) }) } // updateAddrs updates ac.addrs with the new addresses list and handles active // connections or connection attempts. func (ac *addrConn) updateAddrs(addrs []resolver.Address) { - addrs = copyAddressesWithoutBalancerAttributes(addrs) + addrs = copyAddresses(addrs) limit := len(addrs) if limit > 5 { limit = 5 @@ -947,7 +938,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { channelz.Infof(logger, ac.channelz, "addrConn: updateAddrs addrs (%d of %d): %v", limit, len(addrs), addrs[:limit]) ac.mu.Lock() - if equalAddresses(ac.addrs, addrs) { + if equalAddressesIgnoringBalAttributes(ac.addrs, addrs) { ac.mu.Unlock() return } @@ -966,7 +957,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { // Try to find the connected address. for _, a := range addrs { a.ServerName = ac.cc.getServerName(a) - if a.Equal(ac.curAddr) { + if equalAddressIgnoringBalAttributes(&a, &ac.curAddr) { // We are connected to a valid address, so do nothing but // update the addresses. ac.mu.Unlock() @@ -992,11 +983,9 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { ac.updateConnectivityState(connectivity.Idle, nil) } - ac.mu.Unlock() - // Since we were connecting/connected, we should start a new connection // attempt. - go ac.resetTransport() + go ac.resetTransportAndUnlock() } // getServerName determines the serverName to be used in the connection @@ -1190,8 +1179,8 @@ type addrConn struct { addrs []resolver.Address // All addresses that the resolver resolved to. // Use updateConnectivityState for updating addrConn's connectivity state. - state connectivity.State - stateChan chan struct{} // closed and recreated on every state change. + state connectivity.State + stateReadyChan chan struct{} // closed and recreated on every READY state change. backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} @@ -1204,9 +1193,6 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) if ac.state == s { return } - // When changing states, reset the state change channel. - close(ac.stateChan) - ac.stateChan = make(chan struct{}) ac.state = s ac.channelz.ChannelMetrics.State.Store(&s) if lastErr == nil { @@ -1214,7 +1200,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) } else { channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) } - ac.acbw.updateState(s, lastErr) + ac.acbw.updateState(s, ac.curAddr, lastErr) } // adjustParams updates parameters used to create transports upon @@ -1231,8 +1217,10 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) { } } -func (ac *addrConn) resetTransport() { - ac.mu.Lock() +// resetTransportAndUnlock unconditionally connects the addrConn. +// +// ac.mu must be held by the caller, and this function will guarantee it is released. +func (ac *addrConn) resetTransportAndUnlock() { acCtx := ac.ctx if acCtx.Err() != nil { ac.mu.Unlock() @@ -1522,7 +1510,7 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport { func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) { for ctx.Err() == nil { ac.mu.Lock() - t, state, sc := ac.transport, ac.state, ac.stateChan + t, state, sc := ac.transport, ac.state, ac.stateReadyChan ac.mu.Unlock() if state == connectivity.Ready { return t, nil @@ -1585,7 +1573,7 @@ func (ac *addrConn) tearDown(err error) { } else { // Hard close the transport when the channel is entering idle or is // being shutdown. In the case where the channel is being shutdown, - // closing of transports is also taken care of by cancelation of cc.ctx. + // closing of transports is also taken care of by cancellation of cc.ctx. // But in the case where the channel is entering idle, we need to // explicitly close the transports here. Instead of distinguishing // between these two cases, it is simpler to close the transport diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go index 411e3dfd47cc..e840858b77b1 100644 --- a/vendor/google.golang.org/grpc/codec.go +++ b/vendor/google.golang.org/grpc/codec.go @@ -21,18 +21,73 @@ package grpc import ( "google.golang.org/grpc/encoding" _ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto" + "google.golang.org/grpc/mem" ) -// baseCodec contains the functionality of both Codec and encoding.Codec, but -// omits the name/string, which vary between the two and are not needed for -// anything besides the registry in the encoding package. +// baseCodec captures the new encoding.CodecV2 interface without the Name +// function, allowing it to be implemented by older Codec and encoding.Codec +// implementations. The omitted Name function is only needed for the register in +// the encoding package and is not part of the core functionality. type baseCodec interface { - Marshal(v any) ([]byte, error) - Unmarshal(data []byte, v any) error + Marshal(v any) (mem.BufferSlice, error) + Unmarshal(data mem.BufferSlice, v any) error +} + +// getCodec returns an encoding.CodecV2 for the codec of the given name (if +// registered). Initially checks the V2 registry with encoding.GetCodecV2 and +// returns the V2 codec if it is registered. Otherwise, it checks the V1 registry +// with encoding.GetCodec and if it is registered wraps it with newCodecV1Bridge +// to turn it into an encoding.CodecV2. Returns nil otherwise. +func getCodec(name string) encoding.CodecV2 { + if codecV1 := encoding.GetCodec(name); codecV1 != nil { + return newCodecV1Bridge(codecV1) + } + + return encoding.GetCodecV2(name) +} + +func newCodecV0Bridge(c Codec) baseCodec { + return codecV0Bridge{codec: c} +} + +func newCodecV1Bridge(c encoding.Codec) encoding.CodecV2 { + return codecV1Bridge{ + codecV0Bridge: codecV0Bridge{codec: c}, + name: c.Name(), + } +} + +var _ baseCodec = codecV0Bridge{} + +type codecV0Bridge struct { + codec interface { + Marshal(v any) ([]byte, error) + Unmarshal(data []byte, v any) error + } +} + +func (c codecV0Bridge) Marshal(v any) (mem.BufferSlice, error) { + data, err := c.codec.Marshal(v) + if err != nil { + return nil, err + } + return mem.BufferSlice{mem.NewBuffer(&data, nil)}, nil +} + +func (c codecV0Bridge) Unmarshal(data mem.BufferSlice, v any) (err error) { + return c.codec.Unmarshal(data.Materialize(), v) } -var _ baseCodec = Codec(nil) -var _ baseCodec = encoding.Codec(nil) +var _ encoding.CodecV2 = codecV1Bridge{} + +type codecV1Bridge struct { + codecV0Bridge + name string +} + +func (c codecV1Bridge) Name() string { + return c.name +} // Codec defines the interface gRPC uses to encode and decode messages. // Note that implementations of this interface must be thread safe; diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go index 82bee1443bfe..4c805c64462c 100644 --- a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go +++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go @@ -40,7 +40,7 @@ func NewCredentials() credentials.TransportCredentials { // NoSecurity. type insecureTC struct{} -func (insecureTC) ClientHandshake(ctx context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) { +func (insecureTC) ClientHandshake(_ context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) { return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil } diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index f5453d48a53f..2b285beee376 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -33,6 +33,7 @@ import ( "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" ) @@ -60,7 +61,7 @@ func init() { internal.WithBinaryLogger = withBinaryLogger internal.JoinDialOptions = newJoinDialOption internal.DisableGlobalDialOptions = newDisableGlobalDialOptions - internal.WithRecvBufferPool = withRecvBufferPool + internal.WithBufferPool = withBufferPool } // dialOptions configure a Dial call. dialOptions are set by the DialOption @@ -92,7 +93,6 @@ type dialOptions struct { defaultServiceConfigRawJSON *string resolvers []resolver.Builder idleTimeout time.Duration - recvBufferPool SharedBufferPool defaultScheme string maxCallAttempts int } @@ -518,6 +518,8 @@ func WithUserAgent(s string) DialOption { // WithKeepaliveParams returns a DialOption that specifies keepalive parameters // for the client transport. +// +// Keepalive is disabled by default. func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { if kp.Time < internal.KeepaliveMinPingTime { logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) @@ -677,11 +679,11 @@ func defaultDialOptions() dialOptions { WriteBufferSize: defaultWriteBufSize, UseProxy: true, UserAgent: grpcUA, + BufferPool: mem.DefaultBufferPool(), }, bs: internalbackoff.DefaultExponential, healthCheckFunc: internal.HealthCheckFunc, idleTimeout: 30 * time.Minute, - recvBufferPool: nopBufferPool{}, defaultScheme: "dns", maxCallAttempts: defaultMaxCallAttempts, } @@ -758,25 +760,8 @@ func WithMaxCallAttempts(n int) DialOption { }) } -// WithRecvBufferPool returns a DialOption that configures the ClientConn -// to use the provided shared buffer pool for parsing incoming messages. Depending -// on the application's workload, this could result in reduced memory allocation. -// -// If you are unsure about how to implement a memory pool but want to utilize one, -// begin with grpc.NewSharedBufferPool. -// -// Note: The shared buffer pool feature will not be active if any of the following -// options are used: WithStatsHandler, EnableTracing, or binary logging. In such -// cases, the shared buffer pool will be ignored. -// -// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in -// v1.60.0 or later. -func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption { - return withRecvBufferPool(bufferPool) -} - -func withRecvBufferPool(bufferPool SharedBufferPool) DialOption { +func withBufferPool(bufferPool mem.BufferPool) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.recvBufferPool = bufferPool + o.copts.BufferPool = bufferPool }) } diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go index 0022859ad746..e7b532b6f806 100644 --- a/vendor/google.golang.org/grpc/doc.go +++ b/vendor/google.golang.org/grpc/doc.go @@ -16,7 +16,7 @@ * */ -//go:generate ./regenerate.sh +//go:generate ./scripts/regenerate.sh /* Package grpc implements an RPC system called gRPC. diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 5ebf88d7147f..11d0ae142c42 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -94,7 +94,7 @@ type Codec interface { Name() string } -var registeredCodecs = make(map[string]Codec) +var registeredCodecs = make(map[string]any) // RegisterCodec registers the provided Codec for use with all gRPC clients and // servers. @@ -126,5 +126,6 @@ func RegisterCodec(codec Codec) { // // The content-subtype is expected to be lowercase. func GetCodec(contentSubtype string) Codec { - return registeredCodecs[contentSubtype] + c, _ := registeredCodecs[contentSubtype].(Codec) + return c } diff --git a/vendor/google.golang.org/grpc/encoding/encoding_v2.go b/vendor/google.golang.org/grpc/encoding/encoding_v2.go new file mode 100644 index 000000000000..074c5e234a7b --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/encoding_v2.go @@ -0,0 +1,81 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package encoding + +import ( + "strings" + + "google.golang.org/grpc/mem" +) + +// CodecV2 defines the interface gRPC uses to encode and decode messages. Note +// that implementations of this interface must be thread safe; a CodecV2's +// methods can be called from concurrent goroutines. +type CodecV2 interface { + // Marshal returns the wire format of v. The buffers in the returned + // [mem.BufferSlice] must have at least one reference each, which will be freed + // by gRPC when they are no longer needed. + Marshal(v any) (out mem.BufferSlice, err error) + // Unmarshal parses the wire format into v. Note that data will be freed as soon + // as this function returns. If the codec wishes to guarantee access to the data + // after this function, it must take its own reference that it frees when it is + // no longer needed. + Unmarshal(data mem.BufferSlice, v any) error + // Name returns the name of the Codec implementation. The returned string + // will be used as part of content type in transmission. The result must be + // static; the result cannot change between calls. + Name() string +} + +// RegisterCodecV2 registers the provided CodecV2 for use with all gRPC clients and +// servers. +// +// The CodecV2 will be stored and looked up by result of its Name() method, which +// should match the content-subtype of the encoding handled by the CodecV2. This +// is case-insensitive, and is stored and looked up as lowercase. If the +// result of calling Name() is an empty string, RegisterCodecV2 will panic. See +// Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If both a Codec and CodecV2 are registered with the same name, the CodecV2 +// will be used. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Codecs are +// registered with the same name, the one registered last will take effect. +func RegisterCodecV2(codec CodecV2) { + if codec == nil { + panic("cannot register a nil CodecV2") + } + if codec.Name() == "" { + panic("cannot register CodecV2 with empty string result for Name()") + } + contentSubtype := strings.ToLower(codec.Name()) + registeredCodecs[contentSubtype] = codec +} + +// GetCodecV2 gets a registered CodecV2 by content-subtype, or nil if no CodecV2 is +// registered for the content-subtype. +// +// The content-subtype is expected to be lowercase. +func GetCodecV2(contentSubtype string) CodecV2 { + c, _ := registeredCodecs[contentSubtype].(CodecV2) + return c +} diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go index 66d5cdf03ec5..ceec319dd2fb 100644 --- a/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -1,6 +1,6 @@ /* * - * Copyright 2018 gRPC authors. + * Copyright 2024 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,6 +24,7 @@ import ( "fmt" "google.golang.org/grpc/encoding" + "google.golang.org/grpc/mem" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/protoadapt" ) @@ -32,28 +33,51 @@ import ( const Name = "proto" func init() { - encoding.RegisterCodec(codec{}) + encoding.RegisterCodecV2(&codecV2{}) } -// codec is a Codec implementation with protobuf. It is the default codec for gRPC. -type codec struct{} +// codec is a CodecV2 implementation with protobuf. It is the default codec for +// gRPC. +type codecV2 struct{} -func (codec) Marshal(v any) ([]byte, error) { +func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) { vv := messageV2Of(v) if vv == nil { - return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) + return nil, fmt.Errorf("proto: failed to marshal, message is %T, want proto.Message", v) } - return proto.Marshal(vv) + size := proto.Size(vv) + if mem.IsBelowBufferPoolingThreshold(size) { + buf, err := proto.Marshal(vv) + if err != nil { + return nil, err + } + data = append(data, mem.SliceBuffer(buf)) + } else { + pool := mem.DefaultBufferPool() + buf := pool.Get(size) + if _, err := (proto.MarshalOptions{}).MarshalAppend((*buf)[:0], vv); err != nil { + pool.Put(buf) + return nil, err + } + data = append(data, mem.NewBuffer(buf, pool)) + } + + return data, nil } -func (codec) Unmarshal(data []byte, v any) error { +func (c *codecV2) Unmarshal(data mem.BufferSlice, v any) (err error) { vv := messageV2Of(v) if vv == nil { return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) } - return proto.Unmarshal(data, vv) + buf := data.MaterializeToBuffer(mem.DefaultBufferPool()) + defer buf.Free() + // TODO: Upgrade proto.Unmarshal to support mem.BufferSlice. Right now, it's not + // really possible without a major overhaul of the proto package, but the + // vtprotobuf library may be able to support this. + return proto.Unmarshal(buf.ReadOnlyData(), vv) } func messageV2Of(v any) proto.Message { @@ -67,6 +91,6 @@ func messageV2Of(v any) proto.Message { return nil } -func (codec) Name() string { +func (c *codecV2) Name() string { return Name } diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go new file mode 100644 index 000000000000..1d827dd5d9d4 --- /dev/null +++ b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go @@ -0,0 +1,269 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "maps" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" +) + +func init() { + internal.SnapshotMetricRegistryForTesting = snapshotMetricsRegistryForTesting +} + +var logger = grpclog.Component("metrics-registry") + +// DefaultMetrics are the default metrics registered through global metrics +// registry. This is written to at initialization time only, and is read only +// after initialization. +var DefaultMetrics = NewMetrics() + +// MetricDescriptor is the data for a registered metric. +type MetricDescriptor struct { + // The name of this metric. This name must be unique across the whole binary + // (including any per call metrics). See + // https://github.com/grpc/proposal/blob/master/A79-non-per-call-metrics-architecture.md#metric-instrument-naming-conventions + // for metric naming conventions. + Name Metric + // The description of this metric. + Description string + // The unit (e.g. entries, seconds) of this metric. + Unit string + // The required label keys for this metric. These are intended to + // metrics emitted from a stats handler. + Labels []string + // The optional label keys for this metric. These are intended to attached + // to metrics emitted from a stats handler if configured. + OptionalLabels []string + // Whether this metric is on by default. + Default bool + // The type of metric. This is set by the metric registry, and not intended + // to be set by a component registering a metric. + Type MetricType + // Bounds are the bounds of this metric. This only applies to histogram + // metrics. If unset or set with length 0, stats handlers will fall back to + // default bounds. + Bounds []float64 +} + +// MetricType is the type of metric. +type MetricType int + +// Type of metric supported by this instrument registry. +const ( + MetricTypeIntCount MetricType = iota + MetricTypeFloatCount + MetricTypeIntHisto + MetricTypeFloatHisto + MetricTypeIntGauge +) + +// Int64CountHandle is a typed handle for a int count metric. This handle +// is passed at the recording point in order to know which metric to record +// on. +type Int64CountHandle MetricDescriptor + +// Descriptor returns the int64 count handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64CountHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 count value on the metrics recorder provided. +func (h *Int64CountHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Count(h, incr, labels...) +} + +// Float64CountHandle is a typed handle for a float count metric. This handle is +// passed at the recording point in order to know which metric to record on. +type Float64CountHandle MetricDescriptor + +// Descriptor returns the float64 count handle typecast to a pointer to a +// MetricDescriptor. +func (h *Float64CountHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the float64 count value on the metrics recorder provided. +func (h *Float64CountHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) { + recorder.RecordFloat64Count(h, incr, labels...) +} + +// Int64HistoHandle is a typed handle for an int histogram metric. This handle +// is passed at the recording point in order to know which metric to record on. +type Int64HistoHandle MetricDescriptor + +// Descriptor returns the int64 histo handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64HistoHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 histo value on the metrics recorder provided. +func (h *Int64HistoHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Histo(h, incr, labels...) +} + +// Float64HistoHandle is a typed handle for a float histogram metric. This +// handle is passed at the recording point in order to know which metric to +// record on. +type Float64HistoHandle MetricDescriptor + +// Descriptor returns the float64 histo handle typecast to a pointer to a +// MetricDescriptor. +func (h *Float64HistoHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the float64 histo value on the metrics recorder provided. +func (h *Float64HistoHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) { + recorder.RecordFloat64Histo(h, incr, labels...) +} + +// Int64GaugeHandle is a typed handle for an int gauge metric. This handle is +// passed at the recording point in order to know which metric to record on. +type Int64GaugeHandle MetricDescriptor + +// Descriptor returns the int64 gauge handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64GaugeHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 histo value on the metrics recorder provided. +func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Gauge(h, incr, labels...) +} + +// registeredMetrics are the registered metric descriptor names. +var registeredMetrics = make(map[Metric]bool) + +// metricsRegistry contains all of the registered metrics. +// +// This is written to only at init time, and read only after that. +var metricsRegistry = make(map[Metric]*MetricDescriptor) + +// DescriptorForMetric returns the MetricDescriptor from the global registry. +// +// Returns nil if MetricDescriptor not present. +func DescriptorForMetric(metric Metric) *MetricDescriptor { + return metricsRegistry[metric] +} + +func registerMetric(name Metric, def bool) { + if registeredMetrics[name] { + logger.Fatalf("metric %v already registered", name) + } + registeredMetrics[name] = true + if def { + DefaultMetrics = DefaultMetrics.Add(name) + } +} + +// RegisterInt64Count registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Count(descriptor MetricDescriptor) *Int64CountHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntCount + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64CountHandle)(descPtr) +} + +// RegisterFloat64Count registers the metric description onto the global +// registry. It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterFloat64Count(descriptor MetricDescriptor) *Float64CountHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeFloatCount + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Float64CountHandle)(descPtr) +} + +// RegisterInt64Histo registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Histo(descriptor MetricDescriptor) *Int64HistoHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntHisto + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64HistoHandle)(descPtr) +} + +// RegisterFloat64Histo registers the metric description onto the global +// registry. It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterFloat64Histo(descriptor MetricDescriptor) *Float64HistoHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeFloatHisto + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Float64HistoHandle)(descPtr) +} + +// RegisterInt64Gauge registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Gauge(descriptor MetricDescriptor) *Int64GaugeHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntGauge + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64GaugeHandle)(descPtr) +} + +// snapshotMetricsRegistryForTesting snapshots the global data of the metrics +// registry. Returns a cleanup function that sets the metrics registry to its +// original state. +func snapshotMetricsRegistryForTesting() func() { + oldDefaultMetrics := DefaultMetrics + oldRegisteredMetrics := registeredMetrics + oldMetricsRegistry := metricsRegistry + + registeredMetrics = make(map[Metric]bool) + metricsRegistry = make(map[Metric]*MetricDescriptor) + maps.Copy(registeredMetrics, registeredMetrics) + maps.Copy(metricsRegistry, metricsRegistry) + + return func() { + DefaultMetrics = oldDefaultMetrics + registeredMetrics = oldRegisteredMetrics + metricsRegistry = oldMetricsRegistry + } +} diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go new file mode 100644 index 000000000000..3221f7a633a3 --- /dev/null +++ b/vendor/google.golang.org/grpc/experimental/stats/metrics.go @@ -0,0 +1,114 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stats contains experimental metrics/stats API's. +package stats + +import "maps" + +// MetricsRecorder records on metrics derived from metric registry. +type MetricsRecorder interface { + // RecordInt64Count records the measurement alongside labels on the int + // count associated with the provided handle. + RecordInt64Count(handle *Int64CountHandle, incr int64, labels ...string) + // RecordFloat64Count records the measurement alongside labels on the float + // count associated with the provided handle. + RecordFloat64Count(handle *Float64CountHandle, incr float64, labels ...string) + // RecordInt64Histo records the measurement alongside labels on the int + // histo associated with the provided handle. + RecordInt64Histo(handle *Int64HistoHandle, incr int64, labels ...string) + // RecordFloat64Histo records the measurement alongside labels on the float + // histo associated with the provided handle. + RecordFloat64Histo(handle *Float64HistoHandle, incr float64, labels ...string) + // RecordInt64Gauge records the measurement alongside labels on the int + // gauge associated with the provided handle. + RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string) +} + +// Metric is an identifier for a metric. +type Metric string + +// Metrics is a set of metrics to record. Once created, Metrics is immutable, +// however Add and Remove can make copies with specific metrics added or +// removed, respectively. +// +// Do not construct directly; use NewMetrics instead. +type Metrics struct { + // metrics are the set of metrics to initialize. + metrics map[Metric]bool +} + +// NewMetrics returns a Metrics containing Metrics. +func NewMetrics(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for _, metric := range metrics { + newMetrics[metric] = true + } + return &Metrics{ + metrics: newMetrics, + } +} + +// Metrics returns the metrics set. The returned map is read-only and must not +// be modified. +func (m *Metrics) Metrics() map[Metric]bool { + return m.metrics +} + +// Add adds the metrics to the metrics set and returns a new copy with the +// additional metrics. +func (m *Metrics) Add(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metrics { + newMetrics[metric] = true + } + return &Metrics{ + metrics: newMetrics, + } +} + +// Join joins the metrics passed in with the metrics set, and returns a new copy +// with the merged metrics. +func (m *Metrics) Join(metrics *Metrics) *Metrics { + newMetrics := make(map[Metric]bool) + maps.Copy(newMetrics, m.metrics) + maps.Copy(newMetrics, metrics.metrics) + return &Metrics{ + metrics: newMetrics, + } +} + +// Remove removes the metrics from the metrics set and returns a new copy with +// the metrics removed. +func (m *Metrics) Remove(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metrics { + delete(newMetrics, metric) + } + return &Metrics{ + metrics: newMetrics, + } +} diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go index ac73c9ced255..f1ae080dcb81 100644 --- a/vendor/google.golang.org/grpc/grpclog/component.go +++ b/vendor/google.golang.org/grpc/grpclog/component.go @@ -20,8 +20,6 @@ package grpclog import ( "fmt" - - "google.golang.org/grpc/internal/grpclog" ) // componentData records the settings for a component. @@ -33,22 +31,22 @@ var cache = map[string]*componentData{} func (c *componentData) InfoDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.InfoDepth(depth+1, args...) + InfoDepth(depth+1, args...) } func (c *componentData) WarningDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.WarningDepth(depth+1, args...) + WarningDepth(depth+1, args...) } func (c *componentData) ErrorDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.ErrorDepth(depth+1, args...) + ErrorDepth(depth+1, args...) } func (c *componentData) FatalDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.FatalDepth(depth+1, args...) + FatalDepth(depth+1, args...) } func (c *componentData) Info(args ...any) { diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go index 16928c9cb993..db320105e64e 100644 --- a/vendor/google.golang.org/grpc/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -18,18 +18,15 @@ // Package grpclog defines logging for grpc. // -// All logs in transport and grpclb packages only go to verbose level 2. -// All logs in other packages in grpc are logged in spite of the verbosity level. -// -// In the default logger, -// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, -// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. -package grpclog // import "google.golang.org/grpc/grpclog" +// In the default logger, severity level can be set by environment variable +// GRPC_GO_LOG_SEVERITY_LEVEL, verbosity level can be set by +// GRPC_GO_LOG_VERBOSITY_LEVEL. +package grpclog import ( "os" - "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/grpclog/internal" ) func init() { @@ -38,58 +35,58 @@ func init() { // V reports whether verbosity level l is at least the requested verbose level. func V(l int) bool { - return grpclog.Logger.V(l) + return internal.LoggerV2Impl.V(l) } // Info logs to the INFO log. func Info(args ...any) { - grpclog.Logger.Info(args...) + internal.LoggerV2Impl.Info(args...) } // Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. func Infof(format string, args ...any) { - grpclog.Logger.Infof(format, args...) + internal.LoggerV2Impl.Infof(format, args...) } // Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. func Infoln(args ...any) { - grpclog.Logger.Infoln(args...) + internal.LoggerV2Impl.Infoln(args...) } // Warning logs to the WARNING log. func Warning(args ...any) { - grpclog.Logger.Warning(args...) + internal.LoggerV2Impl.Warning(args...) } // Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. func Warningf(format string, args ...any) { - grpclog.Logger.Warningf(format, args...) + internal.LoggerV2Impl.Warningf(format, args...) } // Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. func Warningln(args ...any) { - grpclog.Logger.Warningln(args...) + internal.LoggerV2Impl.Warningln(args...) } // Error logs to the ERROR log. func Error(args ...any) { - grpclog.Logger.Error(args...) + internal.LoggerV2Impl.Error(args...) } // Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. func Errorf(format string, args ...any) { - grpclog.Logger.Errorf(format, args...) + internal.LoggerV2Impl.Errorf(format, args...) } // Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. func Errorln(args ...any) { - grpclog.Logger.Errorln(args...) + internal.LoggerV2Impl.Errorln(args...) } // Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. // It calls os.Exit() with exit code 1. func Fatal(args ...any) { - grpclog.Logger.Fatal(args...) + internal.LoggerV2Impl.Fatal(args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -97,15 +94,15 @@ func Fatal(args ...any) { // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. // It calls os.Exit() with exit code 1. func Fatalf(format string, args ...any) { - grpclog.Logger.Fatalf(format, args...) + internal.LoggerV2Impl.Fatalf(format, args...) // Make sure fatal logs will exit. os.Exit(1) } // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. -// It calle os.Exit()) with exit code 1. +// It calls os.Exit() with exit code 1. func Fatalln(args ...any) { - grpclog.Logger.Fatalln(args...) + internal.LoggerV2Impl.Fatalln(args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -114,19 +111,76 @@ func Fatalln(args ...any) { // // Deprecated: use Info. func Print(args ...any) { - grpclog.Logger.Info(args...) + internal.LoggerV2Impl.Info(args...) } // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. // // Deprecated: use Infof. func Printf(format string, args ...any) { - grpclog.Logger.Infof(format, args...) + internal.LoggerV2Impl.Infof(format, args...) } // Println prints to the logger. Arguments are handled in the manner of fmt.Println. // // Deprecated: use Infoln. func Println(args ...any) { - grpclog.Logger.Infoln(args...) + internal.LoggerV2Impl.Infoln(args...) +} + +// InfoDepth logs to the INFO log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func InfoDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.InfoDepth(depth, args...) + } else { + internal.LoggerV2Impl.Infoln(args...) + } +} + +// WarningDepth logs to the WARNING log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WarningDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.WarningDepth(depth, args...) + } else { + internal.LoggerV2Impl.Warningln(args...) + } +} + +// ErrorDepth logs to the ERROR log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ErrorDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.ErrorDepth(depth, args...) + } else { + internal.LoggerV2Impl.Errorln(args...) + } +} + +// FatalDepth logs to the FATAL log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func FatalDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.FatalDepth(depth, args...) + } else { + internal.LoggerV2Impl.Fatalln(args...) + } + os.Exit(1) } diff --git a/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go new file mode 100644 index 000000000000..59c03bc14c2a --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go @@ -0,0 +1,26 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains functionality internal to the grpclog package. +package internal + +// LoggerV2Impl is the logger used for the non-depth log functions. +var LoggerV2Impl LoggerV2 + +// DepthLoggerV2Impl is the logger used for the depth log functions. +var DepthLoggerV2Impl DepthLoggerV2 diff --git a/vendor/google.golang.org/grpc/grpclog/internal/logger.go b/vendor/google.golang.org/grpc/grpclog/internal/logger.go new file mode 100644 index 000000000000..e524fdd40b23 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/internal/logger.go @@ -0,0 +1,87 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +// Logger mimics golang's standard Logger as an interface. +// +// Deprecated: use LoggerV2. +type Logger interface { + Fatal(args ...any) + Fatalf(format string, args ...any) + Fatalln(args ...any) + Print(args ...any) + Printf(format string, args ...any) + Println(args ...any) +} + +// LoggerWrapper wraps Logger into a LoggerV2. +type LoggerWrapper struct { + Logger +} + +// Info logs to INFO log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Info(args ...any) { + l.Logger.Print(args...) +} + +// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Infoln(args ...any) { + l.Logger.Println(args...) +} + +// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Infof(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Warning(args ...any) { + l.Logger.Print(args...) +} + +// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Warningln(args ...any) { + l.Logger.Println(args...) +} + +// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Warningf(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Error(args ...any) { + l.Logger.Print(args...) +} + +// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Errorln(args ...any) { + l.Logger.Println(args...) +} + +// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Errorf(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// V reports whether verbosity level l is at least the requested verbose level. +func (*LoggerWrapper) V(int) bool { + // Returns true for all verbose level. + return true +} diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go similarity index 52% rename from vendor/google.golang.org/grpc/internal/grpclog/grpclog.go rename to vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go index bfc45102ab24..07df71e98a87 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go @@ -1,6 +1,6 @@ /* * - * Copyright 2020 gRPC authors. + * Copyright 2024 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,59 +16,17 @@ * */ -// Package grpclog (internal) defines depth logging for grpc. -package grpclog +package internal import ( + "encoding/json" + "fmt" + "io" + "log" "os" ) -// Logger is the logger used for the non-depth log functions. -var Logger LoggerV2 - -// DepthLogger is the logger used for the depth log functions. -var DepthLogger DepthLoggerV2 - -// InfoDepth logs to the INFO log at the specified depth. -func InfoDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.InfoDepth(depth, args...) - } else { - Logger.Infoln(args...) - } -} - -// WarningDepth logs to the WARNING log at the specified depth. -func WarningDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.WarningDepth(depth, args...) - } else { - Logger.Warningln(args...) - } -} - -// ErrorDepth logs to the ERROR log at the specified depth. -func ErrorDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.ErrorDepth(depth, args...) - } else { - Logger.Errorln(args...) - } -} - -// FatalDepth logs to the FATAL log at the specified depth. -func FatalDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.FatalDepth(depth, args...) - } else { - Logger.Fatalln(args...) - } - os.Exit(1) -} - // LoggerV2 does underlying logging work for grpclog. -// This is a copy of the LoggerV2 defined in the external grpclog package. It -// is defined here to avoid a circular dependency. type LoggerV2 interface { // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. Info(args ...any) @@ -107,14 +65,13 @@ type LoggerV2 interface { // DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements // DepthLoggerV2, the below functions will be called with the appropriate stack // depth set for trivial functions the logger may ignore. -// This is a copy of the DepthLoggerV2 defined in the external grpclog package. -// It is defined here to avoid a circular dependency. // // # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. type DepthLoggerV2 interface { + LoggerV2 // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. InfoDepth(depth int, args ...any) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. @@ -124,3 +81,124 @@ type DepthLoggerV2 interface { // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. FatalDepth(depth int, args ...any) } + +const ( + // infoLog indicates Info severity. + infoLog int = iota + // warningLog indicates Warning severity. + warningLog + // errorLog indicates Error severity. + errorLog + // fatalLog indicates Fatal severity. + fatalLog +) + +// severityName contains the string representation of each severity. +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// loggerT is the default logger used by grpclog. +type loggerT struct { + m []*log.Logger + v int + jsonFormat bool +} + +func (g *loggerT) output(severity int, s string) { + sevStr := severityName[severity] + if !g.jsonFormat { + g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) + return + } + // TODO: we can also include the logging component, but that needs more + // (API) changes. + b, _ := json.Marshal(map[string]string{ + "severity": sevStr, + "message": s, + }) + g.m[severity].Output(2, string(b)) +} + +func (g *loggerT) Info(args ...any) { + g.output(infoLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Infoln(args ...any) { + g.output(infoLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Infof(format string, args ...any) { + g.output(infoLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Warning(args ...any) { + g.output(warningLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Warningln(args ...any) { + g.output(warningLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Warningf(format string, args ...any) { + g.output(warningLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Error(args ...any) { + g.output(errorLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Errorln(args ...any) { + g.output(errorLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Errorf(format string, args ...any) { + g.output(errorLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Fatal(args ...any) { + g.output(fatalLog, fmt.Sprint(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalln(args ...any) { + g.output(fatalLog, fmt.Sprintln(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalf(format string, args ...any) { + g.output(fatalLog, fmt.Sprintf(format, args...)) + os.Exit(1) +} + +func (g *loggerT) V(l int) bool { + return l <= g.v +} + +// LoggerV2Config configures the LoggerV2 implementation. +type LoggerV2Config struct { + // Verbosity sets the verbosity level of the logger. + Verbosity int + // FormatJSON controls whether the logger should output logs in JSON format. + FormatJSON bool +} + +// NewLoggerV2 creates a new LoggerV2 instance with the provided configuration. +// The infoW, warningW, and errorW writers are used to write log messages of +// different severity levels. +func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 { + var m []*log.Logger + flag := log.LstdFlags + if c.FormatJSON { + flag = 0 + } + m = append(m, log.New(infoW, "", flag)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) + ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. + m = append(m, log.New(ew, "", flag)) + m = append(m, log.New(ew, "", flag)) + return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON} +} diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go index b1674d8267ca..4b203585707a 100644 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -18,70 +18,17 @@ package grpclog -import "google.golang.org/grpc/internal/grpclog" +import "google.golang.org/grpc/grpclog/internal" // Logger mimics golang's standard Logger as an interface. // // Deprecated: use LoggerV2. -type Logger interface { - Fatal(args ...any) - Fatalf(format string, args ...any) - Fatalln(args ...any) - Print(args ...any) - Printf(format string, args ...any) - Println(args ...any) -} +type Logger internal.Logger // SetLogger sets the logger that is used in grpc. Call only from // init() functions. // // Deprecated: use SetLoggerV2. func SetLogger(l Logger) { - grpclog.Logger = &loggerWrapper{Logger: l} -} - -// loggerWrapper wraps Logger into a LoggerV2. -type loggerWrapper struct { - Logger -} - -func (g *loggerWrapper) Info(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Infoln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Infof(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) Warning(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Warningln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Warningf(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) Error(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Errorln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Errorf(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) V(l int) bool { - // Returns true for all verbose level. - return true + internal.LoggerV2Impl = &internal.LoggerWrapper{Logger: l} } diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index ecfd36d71303..892dc13d164b 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -19,52 +19,16 @@ package grpclog import ( - "encoding/json" - "fmt" "io" - "log" "os" "strconv" "strings" - "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/grpclog/internal" ) // LoggerV2 does underlying logging work for grpclog. -type LoggerV2 interface { - // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...any) - // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...any) - // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...any) - // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...any) - // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...any) - // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...any) - // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...any) - // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...any) - // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...any) - // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...any) - // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...any) - // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...any) - // V reports whether verbosity level l is at least the requested verbose level. - V(l int) bool -} +type LoggerV2 internal.LoggerV2 // SetLoggerV2 sets logger that is used in grpc to a V2 logger. // Not mutex-protected, should be called before any gRPC functions. @@ -72,34 +36,8 @@ func SetLoggerV2(l LoggerV2) { if _, ok := l.(*componentData); ok { panic("cannot use component logger as grpclog logger") } - grpclog.Logger = l - grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2) -} - -const ( - // infoLog indicates Info severity. - infoLog int = iota - // warningLog indicates Warning severity. - warningLog - // errorLog indicates Error severity. - errorLog - // fatalLog indicates Fatal severity. - fatalLog -) - -// severityName contains the string representation of each severity. -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", -} - -// loggerT is the default logger used by grpclog. -type loggerT struct { - m []*log.Logger - v int - jsonFormat bool + internal.LoggerV2Impl = l + internal.DepthLoggerV2Impl, _ = l.(internal.DepthLoggerV2) } // NewLoggerV2 creates a loggerV2 with the provided writers. @@ -108,32 +46,13 @@ type loggerT struct { // Warning logs will be written to warningW and infoW. // Info logs will be written to infoW. func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{}) + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{}) } // NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and // verbosity level. func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v}) -} - -type loggerV2Config struct { - verbose int - jsonFormat bool -} - -func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 { - var m []*log.Logger - flag := log.LstdFlags - if c.jsonFormat { - flag = 0 - } - m = append(m, log.New(infoW, "", flag)) - m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) - ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. - m = append(m, log.New(ew, "", flag)) - m = append(m, log.New(ew, "", flag)) - return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat} + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{Verbosity: v}) } // newLoggerV2 creates a loggerV2 to be used as default logger. @@ -161,80 +80,10 @@ func newLoggerV2() LoggerV2 { jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json") - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{ - verbose: v, - jsonFormat: jsonFormat, - }) -} - -func (g *loggerT) output(severity int, s string) { - sevStr := severityName[severity] - if !g.jsonFormat { - g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) - return - } - // TODO: we can also include the logging component, but that needs more - // (API) changes. - b, _ := json.Marshal(map[string]string{ - "severity": sevStr, - "message": s, + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{ + Verbosity: v, + FormatJSON: jsonFormat, }) - g.m[severity].Output(2, string(b)) -} - -func (g *loggerT) Info(args ...any) { - g.output(infoLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Infoln(args ...any) { - g.output(infoLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Infof(format string, args ...any) { - g.output(infoLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Warning(args ...any) { - g.output(warningLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Warningln(args ...any) { - g.output(warningLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Warningf(format string, args ...any) { - g.output(warningLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Error(args ...any) { - g.output(errorLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Errorln(args ...any) { - g.output(errorLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Errorf(format string, args ...any) { - g.output(errorLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Fatal(args ...any) { - g.output(fatalLog, fmt.Sprint(args...)) - os.Exit(1) -} - -func (g *loggerT) Fatalln(args ...any) { - g.output(fatalLog, fmt.Sprintln(args...)) - os.Exit(1) -} - -func (g *loggerT) Fatalf(format string, args ...any) { - g.output(fatalLog, fmt.Sprintf(format, args...)) - os.Exit(1) -} - -func (g *loggerT) V(l int) bool { - return l <= g.v } // DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements @@ -245,14 +94,4 @@ func (g *loggerT) V(l int) bool { // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. -type DepthLoggerV2 interface { - LoggerV2 - // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...any) - // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...any) - // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...any) - // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...any) -} +type DepthLoggerV2 internal.DepthLoggerV2 diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 38b883507350..d92335445f65 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/health/v1/health.proto package grpc_health_v1 @@ -237,7 +237,7 @@ func file_grpc_health_v1_health_proto_rawDescGZIP() []byte { var file_grpc_health_v1_health_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_grpc_health_v1_health_proto_goTypes = []interface{}{ +var file_grpc_health_v1_health_proto_goTypes = []any{ (HealthCheckResponse_ServingStatus)(0), // 0: grpc.health.v1.HealthCheckResponse.ServingStatus (*HealthCheckRequest)(nil), // 1: grpc.health.v1.HealthCheckRequest (*HealthCheckResponse)(nil), // 2: grpc.health.v1.HealthCheckResponse @@ -261,7 +261,7 @@ func file_grpc_health_v1_health_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*HealthCheckRequest); i { case 0: return &v.state @@ -273,7 +273,7 @@ func file_grpc_health_v1_health_proto_init() { return nil } } - file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*HealthCheckResponse); i { case 0: return &v.state diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index 51b736ba06e5..f96b8ab4927e 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.4.0 -// - protoc v4.25.2 +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.1 // source: grpc/health/v1/health.proto package grpc_health_v1 @@ -32,8 +32,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.62.0 or later. -const _ = grpc.SupportPackageIsVersion8 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 const ( Health_Check_FullMethodName = "/grpc.health.v1.Health/Check" @@ -73,7 +73,7 @@ type HealthClient interface { // should assume this method is not supported and should not retry the // call. If the call terminates with any other status (including OK), // clients should retry the call with appropriate exponential backoff. - Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) + Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) } type healthClient struct { @@ -94,13 +94,13 @@ func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts . return out, nil } -func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) { +func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &healthWatchClient{ClientStream: stream} + x := &grpc.GenericClientStream[HealthCheckRequest, HealthCheckResponse]{ClientStream: stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -110,26 +110,12 @@ func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts . return x, nil } -type Health_WatchClient interface { - Recv() (*HealthCheckResponse, error) - grpc.ClientStream -} - -type healthWatchClient struct { - grpc.ClientStream -} - -func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) { - m := new(HealthCheckResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type Health_WatchClient = grpc.ServerStreamingClient[HealthCheckResponse] // HealthServer is the server API for Health service. // All implementations should embed UnimplementedHealthServer -// for forward compatibility +// for forward compatibility. // // Health is gRPC's mechanism for checking whether a server is able to handle // RPCs. Its semantics are documented in @@ -160,19 +146,23 @@ type HealthServer interface { // should assume this method is not supported and should not retry the // call. If the call terminates with any other status (including OK), // clients should retry the call with appropriate exponential backoff. - Watch(*HealthCheckRequest, Health_WatchServer) error + Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error } -// UnimplementedHealthServer should be embedded to have forward compatible implementations. -type UnimplementedHealthServer struct { -} +// UnimplementedHealthServer should be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedHealthServer struct{} func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Check not implemented") } -func (UnimplementedHealthServer) Watch(*HealthCheckRequest, Health_WatchServer) error { +func (UnimplementedHealthServer) Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error { return status.Errorf(codes.Unimplemented, "method Watch not implemented") } +func (UnimplementedHealthServer) testEmbeddedByValue() {} // UnsafeHealthServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to HealthServer will @@ -182,6 +172,13 @@ type UnsafeHealthServer interface { } func RegisterHealthServer(s grpc.ServiceRegistrar, srv HealthServer) { + // If the following call panics, it indicates UnimplementedHealthServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&Health_ServiceDesc, srv) } @@ -208,21 +205,11 @@ func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { if err := stream.RecvMsg(m); err != nil { return err } - return srv.(HealthServer).Watch(m, &healthWatchServer{ServerStream: stream}) -} - -type Health_WatchServer interface { - Send(*HealthCheckResponse) error - grpc.ServerStream + return srv.(HealthServer).Watch(m, &grpc.GenericServerStream[HealthCheckRequest, HealthCheckResponse]{ServerStream: stream}) } -type healthWatchServer struct { - grpc.ServerStream -} - -func (x *healthWatchServer) Send(m *HealthCheckResponse) error { - return x.ServerStream.SendMsg(m) -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type Health_WatchServer = grpc.ServerStreamingServer[HealthCheckResponse] // Health_ServiceDesc is the grpc.ServiceDesc for Health service. // It's only intended for direct use with grpc.RegisterService, diff --git a/vendor/google.golang.org/grpc/health/server.go b/vendor/google.golang.org/grpc/health/server.go index cce6312d77f9..d4b4b7081590 100644 --- a/vendor/google.golang.org/grpc/health/server.go +++ b/vendor/google.golang.org/grpc/health/server.go @@ -51,7 +51,7 @@ func NewServer() *Server { } // Check implements `service Health`. -func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { +func (s *Server) Check(_ context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { s.mu.RLock() defer s.mu.RUnlock() if servingStatus, ok := s.statusMap[in.Service]; ok { diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index aa4505a871df..9669328914ad 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -106,7 +106,7 @@ func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry } // Log creates a proto binary log entry, and logs it to the sink. -func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) { +func (ml *TruncatingMethodLogger) Log(_ context.Context, c LogEntryConfig) { ml.sink.Write(ml.Build(c)) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go index dfe18b08925d..64c791953d01 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go +++ b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go @@ -46,7 +46,7 @@ type entry interface { // channelMap is the storage data structure for channelz. // -// Methods of channelMap can be divided in two two categories with respect to +// Methods of channelMap can be divided into two categories with respect to // locking. // // 1. Methods acquire the global lock. @@ -234,13 +234,6 @@ func copyMap(m map[int64]string) map[int64]string { return n } -func min(a, b int) int { - if a < b { - return a - } - return b -} - func (c *channelMap) getTopChannels(id int64, maxResults int) ([]*Channel, bool) { if maxResults <= 0 { maxResults = EntriesPerPage diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index 03e24e1507aa..078bb81238bc 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -33,7 +33,7 @@ var ( // outside this package except by tests. IDGen IDGenerator - db *channelMap = newChannelMap() + db = newChannelMap() // EntriesPerPage defines the number of channelz entries to be shown on a web page. EntriesPerPage = 50 curState int32 diff --git a/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go index d1ed8df6a518..0e6e18e185c7 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go @@ -35,13 +35,13 @@ type SocketOptionData struct { // Getsockopt defines the function to get socket options requested by channelz. // It is to be passed to syscall.RawConn.Control(). // Windows OS doesn't support Socket Option -func (s *SocketOptionData) Getsockopt(fd uintptr) { +func (s *SocketOptionData) Getsockopt(uintptr) { once.Do(func() { logger.Warning("Channelz: socket options are not supported on non-linux environments") }) } // GetSocketOption gets the socket option info of the conn. -func GetSocketOption(c any) *SocketOptionData { +func GetSocketOption(any) *SocketOptionData { return nil } diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index d90648713944..452985f8d8f1 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -45,7 +45,11 @@ var ( // option is present for backward compatibility. This option may be overridden // by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true" // or "false". - EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", false) + EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true) + // XDSFallbackSupport is the env variable that controls whether support for + // xDS fallback is turned on. If this is unset or is false, only the first + // xDS server in the list of server configs will be used. + XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", false) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go index 7f7044e1731c..7617be215895 100644 --- a/vendor/google.golang.org/grpc/internal/experimental.go +++ b/vendor/google.golang.org/grpc/internal/experimental.go @@ -18,11 +18,11 @@ package internal var ( - // WithRecvBufferPool is implemented by the grpc package and returns a dial + // WithBufferPool is implemented by the grpc package and returns a dial // option to configure a shared buffer pool for a grpc.ClientConn. - WithRecvBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption + WithBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption - // RecvBufferPool is implemented by the grpc package and returns a server + // BufferPool is implemented by the grpc package and returns a server // option to configure a shared buffer pool for a grpc.Server. - RecvBufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption + BufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption ) diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go similarity index 63% rename from vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go rename to vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go index faa998de7632..092ad187a2c8 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go @@ -16,17 +16,21 @@ * */ +// Package grpclog provides logging functionality for internal gRPC packages, +// outside of the functionality provided by the external `grpclog` package. package grpclog import ( "fmt" + + "google.golang.org/grpc/grpclog" ) // PrefixLogger does logging with a prefix. // // Logging method on a nil logs without any prefix. type PrefixLogger struct { - logger DepthLoggerV2 + logger grpclog.DepthLoggerV2 prefix string } @@ -38,7 +42,7 @@ func (pl *PrefixLogger) Infof(format string, args ...any) { pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) return } - InfoDepth(1, fmt.Sprintf(format, args...)) + grpclog.InfoDepth(1, fmt.Sprintf(format, args...)) } // Warningf does warning logging. @@ -48,7 +52,7 @@ func (pl *PrefixLogger) Warningf(format string, args ...any) { pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) return } - WarningDepth(1, fmt.Sprintf(format, args...)) + grpclog.WarningDepth(1, fmt.Sprintf(format, args...)) } // Errorf does error logging. @@ -58,36 +62,18 @@ func (pl *PrefixLogger) Errorf(format string, args ...any) { pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) return } - ErrorDepth(1, fmt.Sprintf(format, args...)) -} - -// Debugf does info logging at verbose level 2. -func (pl *PrefixLogger) Debugf(format string, args ...any) { - // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe - // rewrite PrefixLogger a little to ensure that we don't use the global - // `Logger` here, and instead use the `logger` field. - if !Logger.V(2) { - return - } - if pl != nil { - // Handle nil, so the tests can pass in a nil logger. - format = pl.prefix + format - pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) - return - } - InfoDepth(1, fmt.Sprintf(format, args...)) - + grpclog.ErrorDepth(1, fmt.Sprintf(format, args...)) } // V reports whether verbosity level l is at least the requested verbose level. func (pl *PrefixLogger) V(l int) bool { - // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe - // rewrite PrefixLogger a little to ensure that we don't use the global - // `Logger` here, and instead use the `logger` field. - return Logger.V(l) + if pl != nil { + return pl.logger.V(l) + } + return true } // NewPrefixLogger creates a prefix logger with the given prefix. -func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger { +func NewPrefixLogger(logger grpclog.DepthLoggerV2, prefix string) *PrefixLogger { return &PrefixLogger{logger: logger, prefix: prefix} } diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index f7f40a16acee..19b9d639275a 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -53,16 +53,28 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { return cs } -// Schedule adds a callback to be scheduled after existing callbacks are run. +// TrySchedule tries to schedules the provided callback function f to be +// executed in the order it was added. This is a best-effort operation. If the +// context passed to NewCallbackSerializer was canceled before this method is +// called, the callback will not be scheduled. // // Callbacks are expected to honor the context when performing any blocking // operations, and should return early when the context is canceled. +func (cs *CallbackSerializer) TrySchedule(f func(ctx context.Context)) { + cs.callbacks.Put(f) +} + +// ScheduleOr schedules the provided callback function f to be executed in the +// order it was added. If the context passed to NewCallbackSerializer has been +// canceled before this method is called, the onFailure callback will be +// executed inline instead. // -// Return value indicates if the callback was successfully added to the list of -// callbacks to be executed by the serializer. It is not possible to add -// callbacks once the context passed to NewCallbackSerializer is cancelled. -func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { - return cs.callbacks.Put(f) == nil +// Callbacks are expected to honor the context when performing any blocking +// operations, and should return early when the context is canceled. +func (cs *CallbackSerializer) ScheduleOr(f func(ctx context.Context), onFailure func()) { + if cs.callbacks.Put(f) != nil { + onFailure() + } } func (cs *CallbackSerializer) run(ctx context.Context) { diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go index aef8cec1ab0c..6d8c2f518dff 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go @@ -77,7 +77,7 @@ func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) { if ps.msg != nil { msg := ps.msg - ps.cs.Schedule(func(context.Context) { + ps.cs.TrySchedule(func(context.Context) { ps.mu.Lock() defer ps.mu.Unlock() if !ps.subscribers[sub] { @@ -103,7 +103,7 @@ func (ps *PubSub) Publish(msg any) { ps.msg = msg for sub := range ps.subscribers { s := sub - ps.cs.Schedule(func(context.Context) { + ps.cs.TrySchedule(func(context.Context) { ps.mu.Lock() defer ps.mu.Unlock() if !ps.subscribers[s] { diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 5d6653986923..7aae9240ffc0 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -183,7 +183,7 @@ var ( // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra // metadata to RPCs. - GRPCResolverSchemeExtraMetadata string = "xds" + GRPCResolverSchemeExtraMetadata = "xds" // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. EnterIdleModeForTesting any // func(*grpc.ClientConn) @@ -203,11 +203,31 @@ var ( // UserSetDefaultScheme is set to true if the user has overridden the // default resolver scheme. - UserSetDefaultScheme bool = false + UserSetDefaultScheme = false // ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n // is the number of elements. swap swaps the elements with indexes i and j. ShuffleAddressListForTesting any // func(n int, swap func(i, j int)) + + // ConnectedAddress returns the connected address for a SubConnState. The + // address is only valid if the state is READY. + ConnectedAddress any // func (scs SubConnState) resolver.Address + + // SetConnectedAddress sets the connected address for a SubConnState. + SetConnectedAddress any // func(scs *SubConnState, addr resolver.Address) + + // SnapshotMetricRegistryForTesting snapshots the global data of the metric + // registry. Returns a cleanup function that sets the metric registry to its + // original state. Only called in testing functions. + SnapshotMetricRegistryForTesting func() func() + + // SetDefaultBufferPoolForTesting updates the default buffer pool, for + // testing purposes. + SetDefaultBufferPoolForTesting any // func(mem.BufferPool) + + // SetBufferPoolingThresholdForTesting updates the buffer pooling threshold, for + // testing purposes. + SetBufferPoolingThresholdForTesting any // func(int) ) // HealthChecker defines the signature of the client-side LB channel health diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go index afac56572ad5..b901c7bace50 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go +++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -55,7 +55,7 @@ func (r *passthroughResolver) start() { r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}}) } -func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {} +func (*passthroughResolver) ResolveNow(resolver.ResolveNowOptions) {} func (*passthroughResolver) Close() {} diff --git a/vendor/google.golang.org/grpc/internal/stats/labels.go b/vendor/google.golang.org/grpc/internal/stats/labels.go new file mode 100644 index 000000000000..fd33af51ae89 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/stats/labels.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stats provides internal stats related functionality. +package stats + +import "context" + +// Labels are the labels for metrics. +type Labels struct { + // TelemetryLabels are the telemetry labels to record. + TelemetryLabels map[string]string +} + +type labelsKey struct{} + +// GetLabels returns the Labels stored in the context, or nil if there is one. +func GetLabels(ctx context.Context) *Labels { + labels, _ := ctx.Value(labelsKey{}).(*Labels) + return labels +} + +// SetLabels sets the Labels in the context. +func SetLabels(ctx context.Context, labels *Labels) context.Context { + // could also append + return context.WithValue(ctx, labelsKey{}, labels) +} diff --git a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go new file mode 100644 index 000000000000..be110d41f9a4 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go @@ -0,0 +1,95 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package stats + +import ( + "fmt" + + estats "google.golang.org/grpc/experimental/stats" + "google.golang.org/grpc/stats" +) + +// MetricsRecorderList forwards Record calls to all of its metricsRecorders. +// +// It eats any record calls where the label values provided do not match the +// number of label keys. +type MetricsRecorderList struct { + // metricsRecorders are the metrics recorders this list will forward to. + metricsRecorders []estats.MetricsRecorder +} + +// NewMetricsRecorderList creates a new metric recorder list with all the stats +// handlers provided which implement the MetricsRecorder interface. +// If no stats handlers provided implement the MetricsRecorder interface, +// the MetricsRecorder list returned is a no-op. +func NewMetricsRecorderList(shs []stats.Handler) *MetricsRecorderList { + var mrs []estats.MetricsRecorder + for _, sh := range shs { + if mr, ok := sh.(estats.MetricsRecorder); ok { + mrs = append(mrs, mr) + } + } + return &MetricsRecorderList{ + metricsRecorders: mrs, + } +} + +func verifyLabels(desc *estats.MetricDescriptor, labelsRecv ...string) { + if got, want := len(labelsRecv), len(desc.Labels)+len(desc.OptionalLabels); got != want { + panic(fmt.Sprintf("Received %d labels in call to record metric %q, but expected %d.", got, desc.Name, want)) + } +} + +func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Count(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordFloat64Count(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Histo(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHandle, incr float64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordFloat64Histo(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordInt64Gauge(handle *estats.Int64GaugeHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Gauge(handle, incr, labels...) + } +} diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index c7dbc8205952..757925381fe7 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -138,11 +138,11 @@ func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) { // s.Code() != OK implies that s.Proto() != nil. p := s.Proto() for _, detail := range details { - any, err := anypb.New(protoadapt.MessageV2Of(detail)) + m, err := anypb.New(protoadapt.MessageV2Of(detail)) if err != nil { return nil, err } - p.Details = append(p.Details, any) + p.Details = append(p.Details, m) } return &Status{s: p}, nil } diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go index 999f52cd75bd..54c24c2ff386 100644 --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -58,20 +58,20 @@ func GetRusage() *Rusage { // CPUTimeDiff returns the differences of user CPU time and system CPU time used // between two Rusage structs. It a no-op function for non-linux environments. -func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { +func CPUTimeDiff(*Rusage, *Rusage) (float64, float64) { log() return 0, 0 } // SetTCPUserTimeout is a no-op function under non-linux environments. -func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { +func SetTCPUserTimeout(net.Conn, time.Duration) error { log() return nil } // GetTCPUserTimeout is a no-op function under non-linux environments. // A negative return value indicates the operation is not supported -func GetTCPUserTimeout(conn net.Conn) (int, error) { +func GetTCPUserTimeout(net.Conn) (int, error) { log() return -1, nil } diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go index 078137b7fd70..7e7aaa546368 100644 --- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go @@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer { // combination of unconditionally enabling TCP keepalives here, and // disabling the overriding of TCP keepalive parameters by setting the // KeepAlive field to a negative value above, results in OS defaults for - // the TCP keealive interval and time parameters. + // the TCP keepalive interval and time parameters. Control: func(_, _ string, c syscall.RawConn) error { return c.Control(func(fd uintptr) { unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go index fd7d43a8907b..d5c1085eeaec 100644 --- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go @@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer { // combination of unconditionally enabling TCP keepalives here, and // disabling the overriding of TCP keepalive parameters by setting the // KeepAlive field to a negative value above, results in OS defaults for - // the TCP keealive interval and time parameters. + // the TCP keepalive interval and time parameters. Control: func(_, _ string, c syscall.RawConn) error { return c.Control(func(fd uintptr) { windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1) diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 3deadfb4a20c..ef72fbb3a016 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -32,6 +32,7 @@ import ( "golang.org/x/net/http2/hpack" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/mem" "google.golang.org/grpc/status" ) @@ -148,9 +149,9 @@ type dataFrame struct { streamID uint32 endStream bool h []byte - d []byte + reader mem.Reader // onEachWrite is called every time - // a part of d is written out. + // a part of data is written out. onEachWrite func() } @@ -289,18 +290,22 @@ func (l *outStreamList) dequeue() *outStream { } // controlBuffer is a way to pass information to loopy. -// Information is passed as specific struct types called control frames. -// A control frame not only represents data, messages or headers to be sent out -// but can also be used to instruct loopy to update its internal state. -// It shouldn't be confused with an HTTP2 frame, although some of the control frames -// like dataFrame and headerFrame do go out on wire as HTTP2 frames. +// +// Information is passed as specific struct types called control frames. A +// control frame not only represents data, messages or headers to be sent out +// but can also be used to instruct loopy to update its internal state. It +// shouldn't be confused with an HTTP2 frame, although some of the control +// frames like dataFrame and headerFrame do go out on wire as HTTP2 frames. type controlBuffer struct { - ch chan struct{} - done <-chan struct{} + wakeupCh chan struct{} // Unblocks readers waiting for something to read. + done <-chan struct{} // Closed when the transport is done. + + // Mutex guards all the fields below, except trfChan which can be read + // atomically without holding mu. mu sync.Mutex - consumerWaiting bool - list *itemList - err error + consumerWaiting bool // True when readers are blocked waiting for new data. + closed bool // True when the controlbuf is finished. + list *itemList // List of queued control frames. // transportResponseFrames counts the number of queued items that represent // the response of an action initiated by the peer. trfChan is created @@ -308,47 +313,59 @@ type controlBuffer struct { // closed and nilled when transportResponseFrames drops below the // threshold. Both fields are protected by mu. transportResponseFrames int - trfChan atomic.Value // chan struct{} + trfChan atomic.Pointer[chan struct{}] } func newControlBuffer(done <-chan struct{}) *controlBuffer { return &controlBuffer{ - ch: make(chan struct{}, 1), - list: &itemList{}, - done: done, + wakeupCh: make(chan struct{}, 1), + list: &itemList{}, + done: done, } } -// throttle blocks if there are too many incomingSettings/cleanupStreams in the -// controlbuf. +// throttle blocks if there are too many frames in the control buf that +// represent the response of an action initiated by the peer, like +// incomingSettings cleanupStreams etc. func (c *controlBuffer) throttle() { - ch, _ := c.trfChan.Load().(chan struct{}) - if ch != nil { + if ch := c.trfChan.Load(); ch != nil { select { - case <-ch: + case <-(*ch): case <-c.done: } } } +// put adds an item to the controlbuf. func (c *controlBuffer) put(it cbItem) error { _, err := c.executeAndPut(nil, it) return err } +// executeAndPut runs f, and if the return value is true, adds the given item to +// the controlbuf. The item could be nil, in which case, this method simply +// executes f and does not add the item to the controlbuf. +// +// The first return value indicates whether the item was successfully added to +// the control buffer. A non-nil error, specifically ErrConnClosing, is returned +// if the control buffer is already closed. func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) { - var wakeUp bool c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return false, c.err + defer c.mu.Unlock() + + if c.closed { + return false, ErrConnClosing } if f != nil { if !f() { // f wasn't successful - c.mu.Unlock() return false, nil } } + if it == nil { + return true, nil + } + + var wakeUp bool if c.consumerWaiting { wakeUp = true c.consumerWaiting = false @@ -359,98 +376,102 @@ func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) { if c.transportResponseFrames == maxQueuedTransportResponseFrames { // We are adding the frame that puts us over the threshold; create // a throttling channel. - c.trfChan.Store(make(chan struct{})) + ch := make(chan struct{}) + c.trfChan.Store(&ch) } } - c.mu.Unlock() if wakeUp { select { - case c.ch <- struct{}{}: + case c.wakeupCh <- struct{}{}: default: } } return true, nil } -// Note argument f should never be nil. -func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) { - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return false, c.err - } - if !f(it) { // f wasn't successful - c.mu.Unlock() - return false, nil - } - c.mu.Unlock() - return true, nil -} - +// get returns the next control frame from the control buffer. If block is true +// **and** there are no control frames in the control buffer, the call blocks +// until one of the conditions is met: there is a frame to return or the +// transport is closed. func (c *controlBuffer) get(block bool) (any, error) { for { c.mu.Lock() - if c.err != nil { + frame, err := c.getOnceLocked() + if frame != nil || err != nil || !block { + // If we read a frame or an error, we can return to the caller. The + // call to getOnceLocked() returns a nil frame and a nil error if + // there is nothing to read, and in that case, if the caller asked + // us not to block, we can return now as well. c.mu.Unlock() - return nil, c.err - } - if !c.list.isEmpty() { - h := c.list.dequeue().(cbItem) - if h.isTransportResponseFrame() { - if c.transportResponseFrames == maxQueuedTransportResponseFrames { - // We are removing the frame that put us over the - // threshold; close and clear the throttling channel. - ch := c.trfChan.Load().(chan struct{}) - close(ch) - c.trfChan.Store((chan struct{})(nil)) - } - c.transportResponseFrames-- - } - c.mu.Unlock() - return h, nil - } - if !block { - c.mu.Unlock() - return nil, nil + return frame, err } c.consumerWaiting = true c.mu.Unlock() + + // Release the lock above and wait to be woken up. select { - case <-c.ch: + case <-c.wakeupCh: case <-c.done: return nil, errors.New("transport closed by client") } } } +// Callers must not use this method, but should instead use get(). +// +// Caller must hold c.mu. +func (c *controlBuffer) getOnceLocked() (any, error) { + if c.closed { + return false, ErrConnClosing + } + if c.list.isEmpty() { + return nil, nil + } + h := c.list.dequeue().(cbItem) + if h.isTransportResponseFrame() { + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are removing the frame that put us over the + // threshold; close and clear the throttling channel. + ch := c.trfChan.Swap(nil) + close(*ch) + } + c.transportResponseFrames-- + } + return h, nil +} + +// finish closes the control buffer, cleaning up any streams that have queued +// header frames. Once this method returns, no more frames can be added to the +// control buffer, and attempts to do so will return ErrConnClosing. func (c *controlBuffer) finish() { c.mu.Lock() - if c.err != nil { - c.mu.Unlock() + defer c.mu.Unlock() + + if c.closed { return } - c.err = ErrConnClosing + c.closed = true // There may be headers for streams in the control buffer. // These streams need to be cleaned out since the transport // is still not aware of these yet. for head := c.list.dequeueAll(); head != nil; head = head.next { - hdr, ok := head.it.(*headerFrame) - if !ok { - continue - } - if hdr.onOrphaned != nil { // It will be nil on the server-side. - hdr.onOrphaned(ErrConnClosing) + switch v := head.it.(type) { + case *headerFrame: + if v.onOrphaned != nil { // It will be nil on the server-side. + v.onOrphaned(ErrConnClosing) + } + case *dataFrame: + _ = v.reader.Close() } } + // In case throttle() is currently in flight, it needs to be unblocked. // Otherwise, the transport may not close, since the transport is closed by // the reader encountering the connection error. - ch, _ := c.trfChan.Load().(chan struct{}) + ch := c.trfChan.Swap(nil) if ch != nil { - close(ch) + close(*ch) } - c.trfChan.Store((chan struct{})(nil)) - c.mu.Unlock() } type side int @@ -466,7 +487,7 @@ const ( // stream maintains a queue of data frames; as loopy receives data frames // it gets added to the queue of the relevant stream. // Loopy goes over this list of active streams by processing one node every iteration, -// thereby closely resemebling to a round-robin scheduling over all streams. While +// thereby closely resembling a round-robin scheduling over all streams. While // processing a stream, loopy writes out data bytes from this stream capped by the min // of http2MaxFrameLen, connection-level flow control and stream-level flow control. type loopyWriter struct { @@ -490,12 +511,13 @@ type loopyWriter struct { draining bool conn net.Conn logger *grpclog.PrefixLogger + bufferPool mem.BufferPool // Side-specific handlers ssGoAwayHandler func(*goAway) (bool, error) } -func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error)) *loopyWriter { +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error), bufferPool mem.BufferPool) *loopyWriter { var buf bytes.Buffer l := &loopyWriter{ side: s, @@ -511,6 +533,7 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato conn: conn, logger: logger, ssGoAwayHandler: goAwayHandler, + bufferPool: bufferPool, } return l } @@ -768,6 +791,11 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { // not be established yet. delete(l.estdStreams, c.streamID) str.deleteSelf() + for head := str.itl.dequeueAll(); head != nil; head = head.next { + if df, ok := head.it.(*dataFrame); ok { + _ = df.reader.Close() + } + } } if c.rst { // If RST_STREAM needs to be sent. if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil { @@ -903,16 +931,18 @@ func (l *loopyWriter) processData() (bool, error) { dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. // A data item is represented by a dataFrame, since it later translates into // multiple HTTP2 data frames. - // Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data. - // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the - // maximum possible HTTP2 frame size. + // Every dataFrame has two buffers; h that keeps grpc-message header and data + // that is the actual message. As an optimization to keep wire traffic low, data + // from data is copied to h to make as big as the maximum possible HTTP2 frame + // size. - if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame + if len(dataItem.h) == 0 && dataItem.reader.Remaining() == 0 { // Empty data frame // Client sends out empty data frame with endStream = true if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { return false, err } str.itl.dequeue() // remove the empty data item from stream + _ = dataItem.reader.Close() if str.itl.isEmpty() { str.state = empty } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. @@ -927,9 +957,7 @@ func (l *loopyWriter) processData() (bool, error) { } return false, nil } - var ( - buf []byte - ) + // Figure out the maximum size we can send maxSize := http2MaxFrameLen if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control. @@ -943,43 +971,50 @@ func (l *loopyWriter) processData() (bool, error) { } // Compute how much of the header and data we can send within quota and max frame length hSize := min(maxSize, len(dataItem.h)) - dSize := min(maxSize-hSize, len(dataItem.d)) - if hSize != 0 { - if dSize == 0 { - buf = dataItem.h - } else { - // We can add some data to grpc message header to distribute bytes more equally across frames. - // Copy on the stack to avoid generating garbage - var localBuf [http2MaxFrameLen]byte - copy(localBuf[:hSize], dataItem.h) - copy(localBuf[hSize:], dataItem.d[:dSize]) - buf = localBuf[:hSize+dSize] - } + dSize := min(maxSize-hSize, dataItem.reader.Remaining()) + remainingBytes := len(dataItem.h) + dataItem.reader.Remaining() - hSize - dSize + size := hSize + dSize + + var buf *[]byte + + if hSize != 0 && dSize == 0 { + buf = &dataItem.h } else { - buf = dataItem.d - } + // Note: this is only necessary because the http2.Framer does not support + // partially writing a frame, so the sequence must be materialized into a buffer. + // TODO: Revisit once https://github.com/golang/go/issues/66655 is addressed. + pool := l.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + buf = pool.Get(size) + defer pool.Put(buf) - size := hSize + dSize + copy((*buf)[:hSize], dataItem.h) + _, _ = dataItem.reader.Read((*buf)[hSize:]) + } // Now that outgoing flow controls are checked we can replenish str's write quota str.wq.replenish(size) var endStream bool // If this is the last data message on this stream and all of it can be written in this iteration. - if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size { + if dataItem.endStream && remainingBytes == 0 { endStream = true } if dataItem.onEachWrite != nil { dataItem.onEachWrite() } - if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil { + if err := l.framer.fr.WriteData(dataItem.streamID, endStream, (*buf)[:size]); err != nil { return false, err } str.bytesOutStanding += size l.sendQuota -= uint32(size) dataItem.h = dataItem.h[hSize:] - dataItem.d = dataItem.d[dSize:] - if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out. + if remainingBytes == 0 { // All the data from that message was written out. + _ = dataItem.reader.Close() str.itl.dequeue() } if str.itl.isEmpty() { @@ -998,10 +1033,3 @@ func (l *loopyWriter) processData() (bool, error) { } return false, nil } - -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 4a3ddce29a4e..ce878693bd74 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -24,7 +24,6 @@ package transport import ( - "bytes" "context" "errors" "fmt" @@ -40,6 +39,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -50,7 +50,7 @@ import ( // NewServerHandlerTransport returns a ServerTransport handling gRPC from // inside an http.Handler, or writes an HTTP error to w and returns an error. // It requires that the http Server supports HTTP/2. -func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) { if r.Method != http.MethodPost { w.Header().Set("Allow", http.MethodPost) msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) @@ -98,6 +98,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s contentType: contentType, contentSubtype: contentSubtype, stats: stats, + bufferPool: bufferPool, } st.logger = prefixLoggerForServerHandlerTransport(st) @@ -171,6 +172,8 @@ type serverHandlerTransport struct { stats []stats.Handler logger *grpclog.PrefixLogger + + bufferPool mem.BufferPool } func (ht *serverHandlerTransport) Close(err error) { @@ -244,6 +247,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } s.hdrMu.Lock() + defer s.hdrMu.Unlock() if p := st.Proto(); p != nil && len(p.Details) > 0 { delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) @@ -268,7 +272,6 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } } } - s.hdrMu.Unlock() }) if err == nil { // transport has not been closed @@ -330,16 +333,28 @@ func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { s.hdrMu.Unlock() } -func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { + // Always take a reference because otherwise there is no guarantee the data will + // be available after this function returns. This is what callers to Write + // expect. + data.Ref() headersWritten := s.updateHeaderSent() - return ht.do(func() { + err := ht.do(func() { + defer data.Free() if !headersWritten { ht.writePendingHeaders(s) } ht.rw.Write(hdr) - ht.rw.Write(data) + for _, b := range data { + _, _ = ht.rw.Write(b.ReadOnlyData()) + } ht.rw.(http.Flusher).Flush() }) + if err != nil { + data.Free() + return err + } + return nil } func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { @@ -406,7 +421,7 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } s.trReader = &transportReader{ - reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, + reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf}, windowHandler: func(int) {}, } @@ -415,21 +430,19 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream go func() { defer close(readerDone) - // TODO: minimize garbage, optimize recvBuffer code/ownership - const readSize = 8196 - for buf := make([]byte, readSize); ; { - n, err := req.Body.Read(buf) + for { + buf := ht.bufferPool.Get(http2MaxFrameLen) + n, err := req.Body.Read(*buf) if n > 0 { - s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])}) - buf = buf[n:] + *buf = (*buf)[:n] + s.buf.put(recvMsg{buffer: mem.NewBuffer(buf, ht.bufferPool)}) + } else { + ht.bufferPool.Put(buf) } if err != nil { s.buf.put(recvMsg{err: mapRecvMsgError(err)}) return } - if len(buf) == 0 { - buf = make([]byte, readSize) - } } }() @@ -462,7 +475,7 @@ func (ht *serverHandlerTransport) IncrMsgSent() {} func (ht *serverHandlerTransport) IncrMsgRecv() {} -func (ht *serverHandlerTransport) Drain(debugData string) { +func (ht *serverHandlerTransport) Drain(string) { panic("Drain() is not implemented") } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 3c63c706986d..c769deab53c7 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -47,6 +47,7 @@ import ( isyscall "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" @@ -59,6 +60,8 @@ import ( // atomically. var clientConnectionCounter uint64 +var goAwayLoopyWriterTimeout = 5 * time.Second + var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) // http2Client implements the ClientTransport interface with HTTP2. @@ -144,7 +147,7 @@ type http2Client struct { onClose func(GoAwayReason) - bufferPool *bufferPool + bufferPool mem.BufferPool connectionID uint64 logger *grpclog.PrefixLogger @@ -229,7 +232,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } }(conn) - // The following defer and goroutine monitor the connectCtx for cancelation + // The following defer and goroutine monitor the connectCtx for cancellation // and deadline. On context expiration, the connection is hard closed and // this function will naturally fail as a result. Otherwise, the defer // waits for the goroutine to exit to prevent the context from being @@ -346,7 +349,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts streamQuota: defaultMaxStreamsClient, streamsQuotaAvailable: make(chan struct{}, 1), keepaliveEnabled: keepaliveEnabled, - bufferPool: newBufferPool(), + bufferPool: opts.BufferPool, onClose: onClose, } var czSecurity credentials.ChannelzSecurityValue @@ -463,7 +466,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts return nil, err } go func() { - t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler) + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool) if err := t.loopy.run(); !isIOError(err) { // Immediately close the connection, as the loopy writer returns // when there are no more active streams and we were draining (the @@ -504,7 +507,6 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { closeStream: func(err error) { t.CloseStream(s, err) }, - freeBuffer: t.bufferPool.put, }, windowHandler: func(n int) { t.updateWindow(s, uint32(n)) @@ -770,7 +772,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, hdr := &headerFrame{ hf: headerFields, endStream: false, - initStream: func(id uint32) error { + initStream: func(uint32) error { t.mu.Lock() // TODO: handle transport closure in loopy instead and remove this // initStream is never called when transport is draining. @@ -983,6 +985,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. // only once on a transport. Once it is called, the transport should not be // accessed anymore. func (t *http2Client) Close(err error) { + t.conn.SetWriteDeadline(time.Now().Add(time.Second * 10)) t.mu.Lock() // Make sure we only close once. if t.state == closing { @@ -1006,10 +1009,20 @@ func (t *http2Client) Close(err error) { t.kpDormancyCond.Signal() } t.mu.Unlock() + // Per HTTP/2 spec, a GOAWAY frame must be sent before closing the - // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. + // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. It + // also waits for loopyWriter to be closed with a timer to avoid the + // long blocking in case the connection is blackholed, i.e. TCP is + // just stuck. t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte("client transport shutdown"), closeConn: err}) - <-t.writerDone + timer := time.NewTimer(goAwayLoopyWriterTimeout) + defer timer.Stop() + select { + case <-t.writerDone: // success + case <-timer.C: + t.logger.Infof("Failed to write a GOAWAY frame as part of connection close after %s. Giving up and closing the transport.", goAwayLoopyWriterTimeout) + } t.cancel() t.conn.Close() channelz.RemoveEntry(t.channelz.ID) @@ -1065,27 +1078,36 @@ func (t *http2Client) GracefulClose() { // Write formats the data into HTTP2 data frame(s) and sends it out. The caller // should proceed only if Write returns nil. -func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error { + reader := data.Reader() + if opts.Last { // If it's the last message, update stream state. if !s.compareAndSwapState(streamActive, streamWriteDone) { + _ = reader.Close() return errStreamDone } } else if s.getState() != streamActive { + _ = reader.Close() return errStreamDone } df := &dataFrame{ streamID: s.id, endStream: opts.Last, h: hdr, - d: data, + reader: reader, } - if hdr != nil || data != nil { // If it's not an empty data frame, check quota. - if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + if hdr != nil || df.reader.Remaining() != 0 { // If it's not an empty data frame, check quota. + if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { + _ = reader.Close() return err } } - return t.controlBuf.put(df) + if err := t.controlBuf.put(df); err != nil { + _ = reader.Close() + return err + } + return nil } func (t *http2Client) getStream(f http2.Frame) *Stream { @@ -1190,10 +1212,13 @@ func (t *http2Client) handleData(f *http2.DataFrame) { // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? if len(f.Data()) > 0 { - buffer := t.bufferPool.get() - buffer.Reset() - buffer.Write(f.Data()) - s.write(recvMsg{buffer: buffer}) + pool := t.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) } } // The server has closed the stream without sending trailers. Record that @@ -1222,7 +1247,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { if statusCode == codes.Canceled { if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) { // Our deadline was already exceeded, and that was likely the cause - // of this cancelation. Alter the status code accordingly. + // of this cancellation. Alter the status code accordingly. statusCode = codes.DeadlineExceeded } } @@ -1307,7 +1332,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { id := f.LastStreamID if id > 0 && id%2 == 0 { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id)) + t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id)) return } // A client can receive multiple GoAways from the server (see @@ -1642,11 +1667,10 @@ func (t *http2Client) reader(errCh chan<- error) { t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false) } continue - } else { - // Transport error. - t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) - return } + // Transport error. + t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) + return } switch frame := frame.(type) { case *http2.MetaHeadersFrame: @@ -1671,13 +1695,6 @@ func (t *http2Client) reader(errCh chan<- error) { } } -func minTime(a, b time.Duration) time.Duration { - if a < b { - return a - } - return b -} - // keepalive running in a separate goroutine makes sure the connection is alive by sending pings. func (t *http2Client) keepalive() { p := &ping{data: [8]byte{}} @@ -1745,7 +1762,7 @@ func (t *http2Client) keepalive() { // timeoutLeft. This will ensure that we wait only for kp.Time // before sending out the next ping (for cases where the ping is // acked). - sleepDuration := minTime(t.kp.Time, timeoutLeft) + sleepDuration := min(t.kp.Time, timeoutLeft) timeoutLeft -= sleepDuration timer.Reset(sleepDuration) case <-t.ctx.Done(): diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index b7091165b501..584b50fe5530 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -39,6 +39,7 @@ import ( "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/syscall" + "google.golang.org/grpc/mem" "google.golang.org/protobuf/proto" "google.golang.org/grpc/codes" @@ -119,7 +120,7 @@ type http2Server struct { // Fields below are for channelz metric collection. channelz *channelz.Socket - bufferPool *bufferPool + bufferPool mem.BufferPool connectionID uint64 @@ -261,7 +262,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, idle: time.Now(), kep: kep, initialWindowSize: iwz, - bufferPool: newBufferPool(), + bufferPool: config.BufferPool, } var czSecurity credentials.ChannelzSecurityValue if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok { @@ -330,7 +331,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.handleSettings(sf) go func() { - t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler) + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool) err := t.loopy.run() close(t.loopyWriterDone) if !isIOError(err) { @@ -613,10 +614,9 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) s.trReader = &transportReader{ reader: &recvBufferReader{ - ctx: s.ctx, - ctxDone: s.ctxDone, - recv: s.buf, - freeBuffer: t.bufferPool.put, + ctx: s.ctx, + ctxDone: s.ctxDone, + recv: s.buf, }, windowHandler: func(n int) { t.updateWindow(s, uint32(n)) @@ -813,10 +813,13 @@ func (t *http2Server) handleData(f *http2.DataFrame) { // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? if len(f.Data()) > 0 { - buffer := t.bufferPool.get() - buffer.Reset() - buffer.Write(f.Data()) - s.write(recvMsg{buffer: buffer}) + pool := t.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) } } if f.StreamEnded() { @@ -1089,7 +1092,9 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { onWrite: t.setResetPingStrikes, } - success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) + success, err := t.controlBuf.executeAndPut(func() bool { + return t.checkForHeaderListSize(trailingHeader) + }, nil) if !success { if err != nil { return err @@ -1112,27 +1117,37 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). -func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { + reader := data.Reader() + if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.WriteHeader(s, nil); err != nil { + _ = reader.Close() return err } } else { // Writing headers checks for this condition. if s.getState() == streamDone { + _ = reader.Close() return t.streamContextErr(s) } } + df := &dataFrame{ streamID: s.id, h: hdr, - d: data, + reader: reader, onEachWrite: t.setResetPingStrikes, } - if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { + _ = reader.Close() return t.streamContextErr(s) } - return t.controlBuf.put(df) + if err := t.controlBuf.put(df); err != nil { + _ = reader.Close() + return err + } + return nil } // keepalive running in a separate goroutine does the following: @@ -1223,7 +1238,7 @@ func (t *http2Server) keepalive() { // timeoutLeft. This will ensure that we wait only for kp.Time // before sending out the next ping (for cases where the ping is // acked). - sleepDuration := minTime(t.kp.Time, kpTimeoutLeft) + sleepDuration := min(t.kp.Time, kpTimeoutLeft) kpTimeoutLeft -= sleepDuration kpTimer.Reset(sleepDuration) case <-t.done: diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 39cef3bd442e..3613d7b64817 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -317,28 +317,32 @@ func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter { return w } -func (w *bufWriter) Write(b []byte) (n int, err error) { +func (w *bufWriter) Write(b []byte) (int, error) { if w.err != nil { return 0, w.err } if w.batchSize == 0 { // Buffer has been disabled. - n, err = w.conn.Write(b) + n, err := w.conn.Write(b) return n, toIOError(err) } if w.buf == nil { b := w.pool.Get().(*[]byte) w.buf = *b } + written := 0 for len(b) > 0 { - nn := copy(w.buf[w.offset:], b) - b = b[nn:] - w.offset += nn - n += nn - if w.offset >= w.batchSize { - err = w.flushKeepBuffer() + copied := copy(w.buf[w.offset:], b) + b = b[copied:] + written += copied + w.offset += copied + if w.offset < w.batchSize { + continue + } + if err := w.flushKeepBuffer(); err != nil { + return written, err } } - return n, err + return written, nil } func (w *bufWriter) Flush() error { @@ -389,7 +393,7 @@ type framer struct { fr *http2.Framer } -var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool) +var writeBufferPoolMap = make(map[int]*sync.Pool) var writeBufferMutex sync.Mutex func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer { diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go index 24fa1032574c..54b224436544 100644 --- a/vendor/google.golang.org/grpc/internal/transport/proxy.go +++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -107,8 +107,14 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri } return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump) } - - return &bufConn{Conn: conn, r: r}, nil + // The buffer could contain extra bytes from the target server, so we can't + // discard it. However, in many cases where the server waits for the client + // to send the first message (e.g. when TLS is being used), the buffer will + // be empty, so we can avoid the overhead of reading through this buffer. + if r.Buffered() != 0 { + return &bufConn{Conn: conn, r: r}, nil + } + return conn, nil } // proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 4b39c0ade97c..fdd6fa86cc15 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -22,7 +22,6 @@ package transport import ( - "bytes" "context" "errors" "fmt" @@ -37,6 +36,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" @@ -47,32 +47,10 @@ import ( const logLevel = 2 -type bufferPool struct { - pool sync.Pool -} - -func newBufferPool() *bufferPool { - return &bufferPool{ - pool: sync.Pool{ - New: func() any { - return new(bytes.Buffer) - }, - }, - } -} - -func (p *bufferPool) get() *bytes.Buffer { - return p.pool.Get().(*bytes.Buffer) -} - -func (p *bufferPool) put(b *bytes.Buffer) { - p.pool.Put(b) -} - // recvMsg represents the received msg from the transport. All transport // protocol specific info has been removed. type recvMsg struct { - buffer *bytes.Buffer + buffer mem.Buffer // nil: received some data // io.EOF: stream is completed. data is nil. // other non-nil error: transport failure. data is nil. @@ -102,6 +80,9 @@ func newRecvBuffer() *recvBuffer { func (b *recvBuffer) put(r recvMsg) { b.mu.Lock() if b.err != nil { + // drop the buffer on the floor. Since b.err is not nil, any subsequent reads + // will always return an error, making this buffer inaccessible. + r.buffer.Free() b.mu.Unlock() // An error had occurred earlier, don't accept more // data or errors. @@ -148,45 +129,97 @@ type recvBufferReader struct { ctx context.Context ctxDone <-chan struct{} // cache of ctx.Done() (for performance). recv *recvBuffer - last *bytes.Buffer // Stores the remaining data in the previous calls. + last mem.Buffer // Stores the remaining data in the previous calls. err error - freeBuffer func(*bytes.Buffer) } -// Read reads the next len(p) bytes from last. If last is drained, it tries to -// read additional data from recv. It blocks if there no additional data available -// in recv. If Read returns any non-nil error, it will continue to return that error. -func (r *recvBufferReader) Read(p []byte) (n int, err error) { +func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) { if r.err != nil { return 0, r.err } if r.last != nil { - // Read remaining data left in last call. - copied, _ := r.last.Read(p) - if r.last.Len() == 0 { - r.freeBuffer(r.last) + n, r.last = mem.ReadUnsafe(header, r.last) + return n, nil + } + if r.closeStream != nil { + n, r.err = r.readHeaderClient(header) + } else { + n, r.err = r.readHeader(header) + } + return n, r.err +} + +// Read reads the next n bytes from last. If last is drained, it tries to read +// additional data from recv. It blocks if there no additional data available in +// recv. If Read returns any non-nil error, it will continue to return that +// error. +func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) { + if r.err != nil { + return nil, r.err + } + if r.last != nil { + buf = r.last + if r.last.Len() > n { + buf, r.last = mem.SplitUnsafe(buf, n) + } else { r.last = nil } - return copied, nil + return buf, nil } if r.closeStream != nil { - n, r.err = r.readClient(p) + buf, r.err = r.readClient(n) } else { - n, r.err = r.read(p) + buf, r.err = r.read(n) } - return n, r.err + return buf, r.err } -func (r *recvBufferReader) read(p []byte) (n int, err error) { +func (r *recvBufferReader) readHeader(header []byte) (n int, err error) { select { case <-r.ctxDone: return 0, ContextErr(r.ctx.Err()) case m := <-r.recv.get(): - return r.readAdditional(m, p) + return r.readHeaderAdditional(m, header) + } +} + +func (r *recvBufferReader) read(n int) (buf mem.Buffer, err error) { + select { + case <-r.ctxDone: + return nil, ContextErr(r.ctx.Err()) + case m := <-r.recv.get(): + return r.readAdditional(m, n) + } +} + +func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) { + // If the context is canceled, then closes the stream with nil metadata. + // closeStream writes its error parameter to r.recv as a recvMsg. + // r.readAdditional acts on that message and returns the necessary error. + select { + case <-r.ctxDone: + // Note that this adds the ctx error to the end of recv buffer, and + // reads from the head. This will delay the error until recv buffer is + // empty, thus will delay ctx cancellation in Recv(). + // + // It's done this way to fix a race between ctx cancel and trailer. The + // race was, stream.Recv() may return ctx error if ctxDone wins the + // race, but stream.Trailer() may return a non-nil md because the stream + // was not marked as done when trailer is received. This closeStream + // call will mark stream as done, thus fix the race. + // + // TODO: delaying ctx error seems like a unnecessary side effect. What + // we really want is to mark the stream as done, and return ctx error + // faster. + r.closeStream(ContextErr(r.ctx.Err())) + m := <-r.recv.get() + return r.readHeaderAdditional(m, header) + case m := <-r.recv.get(): + return r.readHeaderAdditional(m, header) } } -func (r *recvBufferReader) readClient(p []byte) (n int, err error) { +func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) { // If the context is canceled, then closes the stream with nil metadata. // closeStream writes its error parameter to r.recv as a recvMsg. // r.readAdditional acts on that message and returns the necessary error. @@ -207,25 +240,40 @@ func (r *recvBufferReader) readClient(p []byte) (n int, err error) { // faster. r.closeStream(ContextErr(r.ctx.Err())) m := <-r.recv.get() - return r.readAdditional(m, p) + return r.readAdditional(m, n) case m := <-r.recv.get(): - return r.readAdditional(m, p) + return r.readAdditional(m, n) } } -func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) { +func (r *recvBufferReader) readHeaderAdditional(m recvMsg, header []byte) (n int, err error) { r.recv.load() if m.err != nil { + if m.buffer != nil { + m.buffer.Free() + } return 0, m.err } - copied, _ := m.buffer.Read(p) - if m.buffer.Len() == 0 { - r.freeBuffer(m.buffer) - r.last = nil - } else { - r.last = m.buffer + + n, r.last = mem.ReadUnsafe(header, m.buffer) + + return n, nil +} + +func (r *recvBufferReader) readAdditional(m recvMsg, n int) (b mem.Buffer, err error) { + r.recv.load() + if m.err != nil { + if m.buffer != nil { + m.buffer.Free() + } + return nil, m.err + } + + if m.buffer.Len() > n { + m.buffer, r.last = mem.SplitUnsafe(m.buffer, n) } - return copied, nil + + return m.buffer, nil } type streamState uint32 @@ -241,7 +289,7 @@ const ( type Stream struct { id uint32 st ServerTransport // nil for client side Stream - ct *http2Client // nil for server side Stream + ct ClientTransport // nil for server side Stream ctx context.Context // the associated context of the stream cancel context.CancelFunc // always nil for client side Stream done chan struct{} // closed at the end of stream to unblock writers. On the client side. @@ -251,7 +299,7 @@ type Stream struct { recvCompress string sendCompress string buf *recvBuffer - trReader io.Reader + trReader *transportReader fc *inFlow wq *writeQuota @@ -408,7 +456,7 @@ func (s *Stream) TrailersOnly() bool { return s.noHeaders } -// Trailer returns the cached trailer metedata. Note that if it is not called +// Trailer returns the cached trailer metadata. Note that if it is not called // after the entire stream is done, it could return an empty MD. Client // side only. // It can be safely read only after stream has ended that is either read @@ -499,36 +547,87 @@ func (s *Stream) write(m recvMsg) { s.buf.put(m) } -// Read reads all p bytes from the wire for this stream. -func (s *Stream) Read(p []byte) (n int, err error) { +func (s *Stream) ReadHeader(header []byte) (err error) { + // Don't request a read if there was an error earlier + if er := s.trReader.er; er != nil { + return er + } + s.requestRead(len(header)) + for len(header) != 0 { + n, err := s.trReader.ReadHeader(header) + header = header[n:] + if len(header) == 0 { + err = nil + } + if err != nil { + if n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + } + return nil +} + +// Read reads n bytes from the wire for this stream. +func (s *Stream) Read(n int) (data mem.BufferSlice, err error) { // Don't request a read if there was an error earlier - if er := s.trReader.(*transportReader).er; er != nil { - return 0, er + if er := s.trReader.er; er != nil { + return nil, er } - s.requestRead(len(p)) - return io.ReadFull(s.trReader, p) + s.requestRead(n) + for n != 0 { + buf, err := s.trReader.Read(n) + var bufLen int + if buf != nil { + bufLen = buf.Len() + } + n -= bufLen + if n == 0 { + err = nil + } + if err != nil { + if bufLen > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + data.Free() + return nil, err + } + data = append(data, buf) + } + return data, nil } -// tranportReader reads all the data available for this Stream from the transport and +// transportReader reads all the data available for this Stream from the transport and // passes them into the decoder, which converts them into a gRPC message stream. // The error is io.EOF when the stream is done or another non-nil error if // the stream broke. type transportReader struct { - reader io.Reader + reader *recvBufferReader // The handler to control the window update procedure for both this // particular stream and the associated transport. windowHandler func(int) er error } -func (t *transportReader) Read(p []byte) (n int, err error) { - n, err = t.reader.Read(p) +func (t *transportReader) ReadHeader(header []byte) (int, error) { + n, err := t.reader.ReadHeader(header) if err != nil { t.er = err - return + return 0, err + } + t.windowHandler(len(header)) + return n, nil +} + +func (t *transportReader) Read(n int) (mem.Buffer, error) { + buf, err := t.reader.Read(n) + if err != nil { + t.er = err + return buf, err } - t.windowHandler(n) - return + t.windowHandler(buf.Len()) + return buf, nil } // BytesReceived indicates whether any bytes have been received on this stream. @@ -574,6 +673,7 @@ type ServerConfig struct { ChannelzParent *channelz.Server MaxHeaderListSize *uint32 HeaderTableSize *uint32 + BufferPool mem.BufferPool } // ConnectOptions covers all relevant options for communicating with the server. @@ -612,6 +712,8 @@ type ConnectOptions struct { MaxHeaderListSize *uint32 // UseProxy specifies if a proxy should be used. UseProxy bool + // The mem.BufferPool to use when reading/writing to the wire. + BufferPool mem.BufferPool } // NewClientTransport establishes the transport with the required ConnectOptions @@ -673,7 +775,7 @@ type ClientTransport interface { // Write sends the data for the given stream. A nil stream indicates // the write is to be performed on the transport as a whole. - Write(s *Stream, hdr []byte, data []byte, opts *Options) error + Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error // NewStream creates a Stream for an RPC. NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) @@ -725,7 +827,7 @@ type ServerTransport interface { // Write sends the data for the given stream. // Write may not be called on all streams. - Write(s *Stream, hdr []byte, data []byte, opts *Options) error + Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error // WriteStatus sends the status of a stream to the client. WriteStatus is // the final call made on a stream and always occurs. @@ -798,7 +900,7 @@ var ( // connection is draining. This could be caused by goaway or balancer // removing the address. errStreamDrain = status.Error(codes.Unavailable, "the connection is draining") - // errStreamDone is returned from write at the client side to indiacte application + // errStreamDone is returned from write at the client side to indicate application // layer of an error. errStreamDone = errors.New("the stream is done") // StatusGoAway indicates that the server sent a GOAWAY that included this diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/benchmark_service.pb.go b/vendor/google.golang.org/grpc/interop/grpc_testing/benchmark_service.pb.go index 6c5afce0beac..3ac1ee37eb69 100644 --- a/vendor/google.golang.org/grpc/interop/grpc_testing/benchmark_service.pb.go +++ b/vendor/google.golang.org/grpc/interop/grpc_testing/benchmark_service.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/testing/benchmark_service.proto package grpc_testing @@ -76,7 +76,7 @@ var file_grpc_testing_benchmark_service_proto_rawDesc = []byte{ 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -var file_grpc_testing_benchmark_service_proto_goTypes = []interface{}{ +var file_grpc_testing_benchmark_service_proto_goTypes = []any{ (*SimpleRequest)(nil), // 0: grpc.testing.SimpleRequest (*SimpleResponse)(nil), // 1: grpc.testing.SimpleResponse } diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/benchmark_service_grpc.pb.go b/vendor/google.golang.org/grpc/interop/grpc_testing/benchmark_service_grpc.pb.go index 3a54d00c1311..cd5d64d7e61f 100644 --- a/vendor/google.golang.org/grpc/interop/grpc_testing/benchmark_service_grpc.pb.go +++ b/vendor/google.golang.org/grpc/interop/grpc_testing/benchmark_service_grpc.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.4.0 -// - protoc v4.25.2 +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.1 // source: grpc/testing/benchmark_service.proto package grpc_testing @@ -143,7 +143,7 @@ type BenchmarkService_StreamingBothWaysClient = grpc.BidiStreamingClient[SimpleR // BenchmarkServiceServer is the server API for BenchmarkService service. // All implementations must embed UnimplementedBenchmarkServiceServer -// for forward compatibility +// for forward compatibility. type BenchmarkServiceServer interface { // One request followed by one response. // The server returns the client payload as-is. @@ -164,9 +164,12 @@ type BenchmarkServiceServer interface { mustEmbedUnimplementedBenchmarkServiceServer() } -// UnimplementedBenchmarkServiceServer must be embedded to have forward compatible implementations. -type UnimplementedBenchmarkServiceServer struct { -} +// UnimplementedBenchmarkServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedBenchmarkServiceServer struct{} func (UnimplementedBenchmarkServiceServer) UnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method UnaryCall not implemented") @@ -184,6 +187,7 @@ func (UnimplementedBenchmarkServiceServer) StreamingBothWays(grpc.BidiStreamingS return status.Errorf(codes.Unimplemented, "method StreamingBothWays not implemented") } func (UnimplementedBenchmarkServiceServer) mustEmbedUnimplementedBenchmarkServiceServer() {} +func (UnimplementedBenchmarkServiceServer) testEmbeddedByValue() {} // UnsafeBenchmarkServiceServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to BenchmarkServiceServer will @@ -193,6 +197,13 @@ type UnsafeBenchmarkServiceServer interface { } func RegisterBenchmarkServiceServer(s grpc.ServiceRegistrar, srv BenchmarkServiceServer) { + // If the following call panics, it indicates UnimplementedBenchmarkServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&BenchmarkService_ServiceDesc, srv) } diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/control.pb.go b/vendor/google.golang.org/grpc/interop/grpc_testing/control.pb.go index c2176a93a35f..e7ca21dcf217 100644 --- a/vendor/google.golang.org/grpc/interop/grpc_testing/control.pb.go +++ b/vendor/google.golang.org/grpc/interop/grpc_testing/control.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/testing/control.proto package grpc_testing @@ -2148,7 +2148,7 @@ func file_grpc_testing_control_proto_rawDescGZIP() []byte { var file_grpc_testing_control_proto_enumTypes = make([]protoimpl.EnumInfo, 3) var file_grpc_testing_control_proto_msgTypes = make([]protoimpl.MessageInfo, 19) -var file_grpc_testing_control_proto_goTypes = []interface{}{ +var file_grpc_testing_control_proto_goTypes = []any{ (ClientType)(0), // 0: grpc.testing.ClientType (ServerType)(0), // 1: grpc.testing.ServerType (RpcType)(0), // 2: grpc.testing.RpcType @@ -2225,7 +2225,7 @@ func file_grpc_testing_control_proto_init() { file_grpc_testing_payloads_proto_init() file_grpc_testing_stats_proto_init() if !protoimpl.UnsafeEnabled { - file_grpc_testing_control_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_control_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*PoissonParams); i { case 0: return &v.state @@ -2237,7 +2237,7 @@ func file_grpc_testing_control_proto_init() { return nil } } - file_grpc_testing_control_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_control_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*ClosedLoopParams); i { case 0: return &v.state @@ -2249,7 +2249,7 @@ func file_grpc_testing_control_proto_init() { return nil } } - file_grpc_testing_control_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_control_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*LoadParams); i { case 0: return &v.state @@ -2261,7 +2261,7 @@ func file_grpc_testing_control_proto_init() { return nil } } - file_grpc_testing_control_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_control_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*SecurityParams); i { case 0: return &v.state @@ -2273,7 +2273,7 @@ func file_grpc_testing_control_proto_init() { return nil } } - file_grpc_testing_control_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_control_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*ChannelArg); i { case 0: return &v.state @@ -2285,7 +2285,7 @@ func file_grpc_testing_control_proto_init() { return nil } } - file_grpc_testing_control_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_control_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*ClientConfig); i { case 0: return &v.state @@ -2297,7 +2297,7 @@ func file_grpc_testing_control_proto_init() { return nil } } - file_grpc_testing_control_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_control_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*ClientStatus); i { case 0: return &v.state @@ -2309,7 +2309,7 @@ func file_grpc_testing_control_proto_init() { return nil } } - file_grpc_testing_control_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_control_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*Mark); i { case 0: return &v.state @@ -2321,7 +2321,7 @@ func file_grpc_testing_control_proto_init() { return nil } } - file_grpc_testing_control_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_control_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*ClientArgs); i { case 0: return &v.state @@ -2333,7 +2333,7 @@ func file_grpc_testing_control_proto_init() { return nil } } - file_grpc_testing_control_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_control_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*ServerConfig); i { case 0: return &v.state @@ -2345,7 +2345,7 @@ func file_grpc_testing_control_proto_init() { return nil } } - file_grpc_testing_control_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_control_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*ServerArgs); i { case 0: return &v.state @@ -2357,7 +2357,7 @@ func file_grpc_testing_control_proto_init() { return nil } } - file_grpc_testing_control_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_control_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*ServerStatus); i { case 0: return &v.state @@ -2369,7 +2369,7 @@ func file_grpc_testing_control_proto_init() { return nil } } - file_grpc_testing_control_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_control_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*CoreRequest); i { case 0: return &v.state @@ -2381,7 +2381,7 @@ func file_grpc_testing_control_proto_init() { return nil } } - file_grpc_testing_control_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_control_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*CoreResponse); i { case 0: return &v.state @@ -2393,7 +2393,7 @@ func file_grpc_testing_control_proto_init() { return nil } } - file_grpc_testing_control_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_control_proto_msgTypes[14].Exporter = func(v any, i int) any { switch v := v.(*Void); i { case 0: return &v.state @@ -2405,7 +2405,7 @@ func file_grpc_testing_control_proto_init() { return nil } } - file_grpc_testing_control_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_control_proto_msgTypes[15].Exporter = func(v any, i int) any { switch v := v.(*Scenario); i { case 0: return &v.state @@ -2417,7 +2417,7 @@ func file_grpc_testing_control_proto_init() { return nil } } - file_grpc_testing_control_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_control_proto_msgTypes[16].Exporter = func(v any, i int) any { switch v := v.(*Scenarios); i { case 0: return &v.state @@ -2429,7 +2429,7 @@ func file_grpc_testing_control_proto_init() { return nil } } - file_grpc_testing_control_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_control_proto_msgTypes[17].Exporter = func(v any, i int) any { switch v := v.(*ScenarioResultSummary); i { case 0: return &v.state @@ -2441,7 +2441,7 @@ func file_grpc_testing_control_proto_init() { return nil } } - file_grpc_testing_control_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_control_proto_msgTypes[18].Exporter = func(v any, i int) any { switch v := v.(*ScenarioResult); i { case 0: return &v.state @@ -2454,19 +2454,19 @@ func file_grpc_testing_control_proto_init() { } } } - file_grpc_testing_control_proto_msgTypes[2].OneofWrappers = []interface{}{ + file_grpc_testing_control_proto_msgTypes[2].OneofWrappers = []any{ (*LoadParams_ClosedLoop)(nil), (*LoadParams_Poisson)(nil), } - file_grpc_testing_control_proto_msgTypes[4].OneofWrappers = []interface{}{ + file_grpc_testing_control_proto_msgTypes[4].OneofWrappers = []any{ (*ChannelArg_StrValue)(nil), (*ChannelArg_IntValue)(nil), } - file_grpc_testing_control_proto_msgTypes[8].OneofWrappers = []interface{}{ + file_grpc_testing_control_proto_msgTypes[8].OneofWrappers = []any{ (*ClientArgs_Setup)(nil), (*ClientArgs_Mark)(nil), } - file_grpc_testing_control_proto_msgTypes[10].OneofWrappers = []interface{}{ + file_grpc_testing_control_proto_msgTypes[10].OneofWrappers = []any{ (*ServerArgs_Setup)(nil), (*ServerArgs_Mark)(nil), } diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/core/stats.pb.go b/vendor/google.golang.org/grpc/interop/grpc_testing/core/stats.pb.go index 99369152c0be..e6ca38ef3597 100644 --- a/vendor/google.golang.org/grpc/interop/grpc_testing/core/stats.pb.go +++ b/vendor/google.golang.org/grpc/interop/grpc_testing/core/stats.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/core/stats.proto package core @@ -311,7 +311,7 @@ func file_grpc_core_stats_proto_rawDescGZIP() []byte { } var file_grpc_core_stats_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_grpc_core_stats_proto_goTypes = []interface{}{ +var file_grpc_core_stats_proto_goTypes = []any{ (*Bucket)(nil), // 0: grpc.core.Bucket (*Histogram)(nil), // 1: grpc.core.Histogram (*Metric)(nil), // 2: grpc.core.Metric @@ -334,7 +334,7 @@ func file_grpc_core_stats_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_core_stats_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_core_stats_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Bucket); i { case 0: return &v.state @@ -346,7 +346,7 @@ func file_grpc_core_stats_proto_init() { return nil } } - file_grpc_core_stats_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_core_stats_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*Histogram); i { case 0: return &v.state @@ -358,7 +358,7 @@ func file_grpc_core_stats_proto_init() { return nil } } - file_grpc_core_stats_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_grpc_core_stats_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*Metric); i { case 0: return &v.state @@ -370,7 +370,7 @@ func file_grpc_core_stats_proto_init() { return nil } } - file_grpc_core_stats_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_grpc_core_stats_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*Stats); i { case 0: return &v.state @@ -383,7 +383,7 @@ func file_grpc_core_stats_proto_init() { } } } - file_grpc_core_stats_proto_msgTypes[2].OneofWrappers = []interface{}{ + file_grpc_core_stats_proto_msgTypes[2].OneofWrappers = []any{ (*Metric_Count)(nil), (*Metric_Histogram)(nil), } diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/empty.pb.go b/vendor/google.golang.org/grpc/interop/grpc_testing/empty.pb.go index c91a2c0f5b27..7709f270e498 100644 --- a/vendor/google.golang.org/grpc/interop/grpc_testing/empty.pb.go +++ b/vendor/google.golang.org/grpc/interop/grpc_testing/empty.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/testing/empty.proto package grpc_testing @@ -104,7 +104,7 @@ func file_grpc_testing_empty_proto_rawDescGZIP() []byte { } var file_grpc_testing_empty_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_grpc_testing_empty_proto_goTypes = []interface{}{ +var file_grpc_testing_empty_proto_goTypes = []any{ (*Empty)(nil), // 0: grpc.testing.Empty } var file_grpc_testing_empty_proto_depIdxs = []int32{ @@ -121,7 +121,7 @@ func file_grpc_testing_empty_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_testing_empty_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_empty_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Empty); i { case 0: return &v.state diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/messages.pb.go b/vendor/google.golang.org/grpc/interop/grpc_testing/messages.pb.go index c9d37b297a1b..d3ccd24ff9de 100644 --- a/vendor/google.golang.org/grpc/interop/grpc_testing/messages.pb.go +++ b/vendor/google.golang.org/grpc/interop/grpc_testing/messages.pb.go @@ -16,8 +16,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/testing/messages.proto package grpc_testing @@ -2518,7 +2518,7 @@ func file_grpc_testing_messages_proto_rawDescGZIP() []byte { var file_grpc_testing_messages_proto_enumTypes = make([]protoimpl.EnumInfo, 5) var file_grpc_testing_messages_proto_msgTypes = make([]protoimpl.MessageInfo, 40) -var file_grpc_testing_messages_proto_goTypes = []interface{}{ +var file_grpc_testing_messages_proto_goTypes = []any{ (PayloadType)(0), // 0: grpc.testing.PayloadType (GrpclbRouteType)(0), // 1: grpc.testing.GrpclbRouteType (LoadBalancerStatsResponse_MetadataType)(0), // 2: grpc.testing.LoadBalancerStatsResponse.MetadataType @@ -2618,7 +2618,7 @@ func file_grpc_testing_messages_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_testing_messages_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_messages_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*BoolValue); i { case 0: return &v.state @@ -2630,7 +2630,7 @@ func file_grpc_testing_messages_proto_init() { return nil } } - file_grpc_testing_messages_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_messages_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*Payload); i { case 0: return &v.state @@ -2642,7 +2642,7 @@ func file_grpc_testing_messages_proto_init() { return nil } } - file_grpc_testing_messages_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_messages_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*EchoStatus); i { case 0: return &v.state @@ -2654,7 +2654,7 @@ func file_grpc_testing_messages_proto_init() { return nil } } - file_grpc_testing_messages_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_messages_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*SimpleRequest); i { case 0: return &v.state @@ -2666,7 +2666,7 @@ func file_grpc_testing_messages_proto_init() { return nil } } - file_grpc_testing_messages_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_messages_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*SimpleResponse); i { case 0: return &v.state @@ -2678,7 +2678,7 @@ func file_grpc_testing_messages_proto_init() { return nil } } - file_grpc_testing_messages_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_messages_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*StreamingInputCallRequest); i { case 0: return &v.state @@ -2690,7 +2690,7 @@ func file_grpc_testing_messages_proto_init() { return nil } } - file_grpc_testing_messages_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_messages_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*StreamingInputCallResponse); i { case 0: return &v.state @@ -2702,7 +2702,7 @@ func file_grpc_testing_messages_proto_init() { return nil } } - file_grpc_testing_messages_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_messages_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*ResponseParameters); i { case 0: return &v.state @@ -2714,7 +2714,7 @@ func file_grpc_testing_messages_proto_init() { return nil } } - file_grpc_testing_messages_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_messages_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*StreamingOutputCallRequest); i { case 0: return &v.state @@ -2726,7 +2726,7 @@ func file_grpc_testing_messages_proto_init() { return nil } } - file_grpc_testing_messages_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_messages_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*StreamingOutputCallResponse); i { case 0: return &v.state @@ -2738,7 +2738,7 @@ func file_grpc_testing_messages_proto_init() { return nil } } - file_grpc_testing_messages_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_messages_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*ReconnectParams); i { case 0: return &v.state @@ -2750,7 +2750,7 @@ func file_grpc_testing_messages_proto_init() { return nil } } - file_grpc_testing_messages_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_messages_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*ReconnectInfo); i { case 0: return &v.state @@ -2762,7 +2762,7 @@ func file_grpc_testing_messages_proto_init() { return nil } } - file_grpc_testing_messages_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_messages_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*LoadBalancerStatsRequest); i { case 0: return &v.state @@ -2774,7 +2774,7 @@ func file_grpc_testing_messages_proto_init() { return nil } } - file_grpc_testing_messages_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_messages_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*LoadBalancerStatsResponse); i { case 0: return &v.state @@ -2786,7 +2786,7 @@ func file_grpc_testing_messages_proto_init() { return nil } } - file_grpc_testing_messages_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_messages_proto_msgTypes[14].Exporter = func(v any, i int) any { switch v := v.(*LoadBalancerAccumulatedStatsRequest); i { case 0: return &v.state @@ -2798,7 +2798,7 @@ func file_grpc_testing_messages_proto_init() { return nil } } - file_grpc_testing_messages_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_messages_proto_msgTypes[15].Exporter = func(v any, i int) any { switch v := v.(*LoadBalancerAccumulatedStatsResponse); i { case 0: return &v.state @@ -2810,7 +2810,7 @@ func file_grpc_testing_messages_proto_init() { return nil } } - file_grpc_testing_messages_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_messages_proto_msgTypes[16].Exporter = func(v any, i int) any { switch v := v.(*ClientConfigureRequest); i { case 0: return &v.state @@ -2822,7 +2822,7 @@ func file_grpc_testing_messages_proto_init() { return nil } } - file_grpc_testing_messages_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_messages_proto_msgTypes[17].Exporter = func(v any, i int) any { switch v := v.(*ClientConfigureResponse); i { case 0: return &v.state @@ -2834,7 +2834,7 @@ func file_grpc_testing_messages_proto_init() { return nil } } - file_grpc_testing_messages_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_messages_proto_msgTypes[18].Exporter = func(v any, i int) any { switch v := v.(*MemorySize); i { case 0: return &v.state @@ -2846,7 +2846,7 @@ func file_grpc_testing_messages_proto_init() { return nil } } - file_grpc_testing_messages_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_messages_proto_msgTypes[19].Exporter = func(v any, i int) any { switch v := v.(*TestOrcaReport); i { case 0: return &v.state @@ -2858,7 +2858,7 @@ func file_grpc_testing_messages_proto_init() { return nil } } - file_grpc_testing_messages_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_messages_proto_msgTypes[20].Exporter = func(v any, i int) any { switch v := v.(*SetReturnStatusRequest); i { case 0: return &v.state @@ -2870,7 +2870,7 @@ func file_grpc_testing_messages_proto_init() { return nil } } - file_grpc_testing_messages_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_messages_proto_msgTypes[21].Exporter = func(v any, i int) any { switch v := v.(*HookRequest); i { case 0: return &v.state @@ -2882,7 +2882,7 @@ func file_grpc_testing_messages_proto_init() { return nil } } - file_grpc_testing_messages_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_messages_proto_msgTypes[22].Exporter = func(v any, i int) any { switch v := v.(*HookResponse); i { case 0: return &v.state @@ -2894,7 +2894,7 @@ func file_grpc_testing_messages_proto_init() { return nil } } - file_grpc_testing_messages_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_messages_proto_msgTypes[23].Exporter = func(v any, i int) any { switch v := v.(*LoadBalancerStatsResponse_MetadataEntry); i { case 0: return &v.state @@ -2906,7 +2906,7 @@ func file_grpc_testing_messages_proto_init() { return nil } } - file_grpc_testing_messages_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_messages_proto_msgTypes[24].Exporter = func(v any, i int) any { switch v := v.(*LoadBalancerStatsResponse_RpcMetadata); i { case 0: return &v.state @@ -2918,7 +2918,7 @@ func file_grpc_testing_messages_proto_init() { return nil } } - file_grpc_testing_messages_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_messages_proto_msgTypes[25].Exporter = func(v any, i int) any { switch v := v.(*LoadBalancerStatsResponse_MetadataByPeer); i { case 0: return &v.state @@ -2930,7 +2930,7 @@ func file_grpc_testing_messages_proto_init() { return nil } } - file_grpc_testing_messages_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_messages_proto_msgTypes[26].Exporter = func(v any, i int) any { switch v := v.(*LoadBalancerStatsResponse_RpcsByPeer); i { case 0: return &v.state @@ -2942,7 +2942,7 @@ func file_grpc_testing_messages_proto_init() { return nil } } - file_grpc_testing_messages_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_messages_proto_msgTypes[34].Exporter = func(v any, i int) any { switch v := v.(*LoadBalancerAccumulatedStatsResponse_MethodStats); i { case 0: return &v.state @@ -2954,7 +2954,7 @@ func file_grpc_testing_messages_proto_init() { return nil } } - file_grpc_testing_messages_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_messages_proto_msgTypes[37].Exporter = func(v any, i int) any { switch v := v.(*ClientConfigureRequest_Metadata); i { case 0: return &v.state diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/payloads.pb.go b/vendor/google.golang.org/grpc/interop/grpc_testing/payloads.pb.go index 712c92241366..849bff770ca3 100644 --- a/vendor/google.golang.org/grpc/interop/grpc_testing/payloads.pb.go +++ b/vendor/google.golang.org/grpc/interop/grpc_testing/payloads.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/testing/payloads.proto package grpc_testing @@ -327,7 +327,7 @@ func file_grpc_testing_payloads_proto_rawDescGZIP() []byte { } var file_grpc_testing_payloads_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_grpc_testing_payloads_proto_goTypes = []interface{}{ +var file_grpc_testing_payloads_proto_goTypes = []any{ (*ByteBufferParams)(nil), // 0: grpc.testing.ByteBufferParams (*SimpleProtoParams)(nil), // 1: grpc.testing.SimpleProtoParams (*ComplexProtoParams)(nil), // 2: grpc.testing.ComplexProtoParams @@ -350,7 +350,7 @@ func file_grpc_testing_payloads_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_testing_payloads_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_payloads_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*ByteBufferParams); i { case 0: return &v.state @@ -362,7 +362,7 @@ func file_grpc_testing_payloads_proto_init() { return nil } } - file_grpc_testing_payloads_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_payloads_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*SimpleProtoParams); i { case 0: return &v.state @@ -374,7 +374,7 @@ func file_grpc_testing_payloads_proto_init() { return nil } } - file_grpc_testing_payloads_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_payloads_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ComplexProtoParams); i { case 0: return &v.state @@ -386,7 +386,7 @@ func file_grpc_testing_payloads_proto_init() { return nil } } - file_grpc_testing_payloads_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_payloads_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*PayloadConfig); i { case 0: return &v.state @@ -399,7 +399,7 @@ func file_grpc_testing_payloads_proto_init() { } } } - file_grpc_testing_payloads_proto_msgTypes[3].OneofWrappers = []interface{}{ + file_grpc_testing_payloads_proto_msgTypes[3].OneofWrappers = []any{ (*PayloadConfig_BytebufParams)(nil), (*PayloadConfig_SimpleParams)(nil), (*PayloadConfig_ComplexParams)(nil), diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/report_qps_scenario_service.pb.go b/vendor/google.golang.org/grpc/interop/grpc_testing/report_qps_scenario_service.pb.go index 4bc43c642c86..dd2ffbee88a4 100644 --- a/vendor/google.golang.org/grpc/interop/grpc_testing/report_qps_scenario_service.pb.go +++ b/vendor/google.golang.org/grpc/interop/grpc_testing/report_qps_scenario_service.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/testing/report_qps_scenario_service.proto package grpc_testing @@ -57,7 +57,7 @@ var file_grpc_testing_report_qps_scenario_service_proto_rawDesc = []byte{ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -var file_grpc_testing_report_qps_scenario_service_proto_goTypes = []interface{}{ +var file_grpc_testing_report_qps_scenario_service_proto_goTypes = []any{ (*ScenarioResult)(nil), // 0: grpc.testing.ScenarioResult (*Void)(nil), // 1: grpc.testing.Void } diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/report_qps_scenario_service_grpc.pb.go b/vendor/google.golang.org/grpc/interop/grpc_testing/report_qps_scenario_service_grpc.pb.go index cf4aff88756a..9198e4a6e194 100644 --- a/vendor/google.golang.org/grpc/interop/grpc_testing/report_qps_scenario_service_grpc.pb.go +++ b/vendor/google.golang.org/grpc/interop/grpc_testing/report_qps_scenario_service_grpc.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.4.0 -// - protoc v4.25.2 +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.1 // source: grpc/testing/report_qps_scenario_service.proto package grpc_testing @@ -67,22 +67,26 @@ func (c *reportQpsScenarioServiceClient) ReportScenario(ctx context.Context, in // ReportQpsScenarioServiceServer is the server API for ReportQpsScenarioService service. // All implementations must embed UnimplementedReportQpsScenarioServiceServer -// for forward compatibility +// for forward compatibility. type ReportQpsScenarioServiceServer interface { // Report results of a QPS test benchmark scenario. ReportScenario(context.Context, *ScenarioResult) (*Void, error) mustEmbedUnimplementedReportQpsScenarioServiceServer() } -// UnimplementedReportQpsScenarioServiceServer must be embedded to have forward compatible implementations. -type UnimplementedReportQpsScenarioServiceServer struct { -} +// UnimplementedReportQpsScenarioServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedReportQpsScenarioServiceServer struct{} func (UnimplementedReportQpsScenarioServiceServer) ReportScenario(context.Context, *ScenarioResult) (*Void, error) { return nil, status.Errorf(codes.Unimplemented, "method ReportScenario not implemented") } func (UnimplementedReportQpsScenarioServiceServer) mustEmbedUnimplementedReportQpsScenarioServiceServer() { } +func (UnimplementedReportQpsScenarioServiceServer) testEmbeddedByValue() {} // UnsafeReportQpsScenarioServiceServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to ReportQpsScenarioServiceServer will @@ -92,6 +96,13 @@ type UnsafeReportQpsScenarioServiceServer interface { } func RegisterReportQpsScenarioServiceServer(s grpc.ServiceRegistrar, srv ReportQpsScenarioServiceServer) { + // If the following call panics, it indicates UnimplementedReportQpsScenarioServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&ReportQpsScenarioService_ServiceDesc, srv) } diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/stats.pb.go b/vendor/google.golang.org/grpc/interop/grpc_testing/stats.pb.go index 351a964a6f27..25110f6209df 100644 --- a/vendor/google.golang.org/grpc/interop/grpc_testing/stats.pb.go +++ b/vendor/google.golang.org/grpc/interop/grpc_testing/stats.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/testing/stats.proto package grpc_testing @@ -521,7 +521,7 @@ func file_grpc_testing_stats_proto_rawDescGZIP() []byte { } var file_grpc_testing_stats_proto_msgTypes = make([]protoimpl.MessageInfo, 5) -var file_grpc_testing_stats_proto_goTypes = []interface{}{ +var file_grpc_testing_stats_proto_goTypes = []any{ (*ServerStats)(nil), // 0: grpc.testing.ServerStats (*HistogramParams)(nil), // 1: grpc.testing.HistogramParams (*HistogramData)(nil), // 2: grpc.testing.HistogramData @@ -547,7 +547,7 @@ func file_grpc_testing_stats_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_testing_stats_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_stats_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*ServerStats); i { case 0: return &v.state @@ -559,7 +559,7 @@ func file_grpc_testing_stats_proto_init() { return nil } } - file_grpc_testing_stats_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_stats_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*HistogramParams); i { case 0: return &v.state @@ -571,7 +571,7 @@ func file_grpc_testing_stats_proto_init() { return nil } } - file_grpc_testing_stats_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_stats_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*HistogramData); i { case 0: return &v.state @@ -583,7 +583,7 @@ func file_grpc_testing_stats_proto_init() { return nil } } - file_grpc_testing_stats_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_stats_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*RequestResultCount); i { case 0: return &v.state @@ -595,7 +595,7 @@ func file_grpc_testing_stats_proto_init() { return nil } } - file_grpc_testing_stats_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_stats_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*ClientStats); i { case 0: return &v.state diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/test.pb.go b/vendor/google.golang.org/grpc/interop/grpc_testing/test.pb.go index f0ffbd91ebc3..ee1845d15181 100644 --- a/vendor/google.golang.org/grpc/interop/grpc_testing/test.pb.go +++ b/vendor/google.golang.org/grpc/interop/grpc_testing/test.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/testing/test.proto package grpc_testing @@ -160,7 +160,7 @@ var file_grpc_testing_test_proto_rawDesc = []byte{ 0x74, 0x6f, 0x33, } -var file_grpc_testing_test_proto_goTypes = []interface{}{ +var file_grpc_testing_test_proto_goTypes = []any{ (*Empty)(nil), // 0: grpc.testing.Empty (*SimpleRequest)(nil), // 1: grpc.testing.SimpleRequest (*StreamingOutputCallRequest)(nil), // 2: grpc.testing.StreamingOutputCallRequest diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/test_grpc.pb.go b/vendor/google.golang.org/grpc/interop/grpc_testing/test_grpc.pb.go index fd79b2f02c13..2e35398ed33e 100644 --- a/vendor/google.golang.org/grpc/interop/grpc_testing/test_grpc.pb.go +++ b/vendor/google.golang.org/grpc/interop/grpc_testing/test_grpc.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.4.0 -// - protoc v4.25.2 +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.1 // source: grpc/testing/test.proto package grpc_testing @@ -189,7 +189,7 @@ func (c *testServiceClient) UnimplementedCall(ctx context.Context, in *Empty, op // TestServiceServer is the server API for TestService service. // All implementations must embed UnimplementedTestServiceServer -// for forward compatibility +// for forward compatibility. // // A simple service to test the various types of RPCs and experiment with // performance with various types of payload. @@ -223,9 +223,12 @@ type TestServiceServer interface { mustEmbedUnimplementedTestServiceServer() } -// UnimplementedTestServiceServer must be embedded to have forward compatible implementations. -type UnimplementedTestServiceServer struct { -} +// UnimplementedTestServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedTestServiceServer struct{} func (UnimplementedTestServiceServer) EmptyCall(context.Context, *Empty) (*Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method EmptyCall not implemented") @@ -252,6 +255,7 @@ func (UnimplementedTestServiceServer) UnimplementedCall(context.Context, *Empty) return nil, status.Errorf(codes.Unimplemented, "method UnimplementedCall not implemented") } func (UnimplementedTestServiceServer) mustEmbedUnimplementedTestServiceServer() {} +func (UnimplementedTestServiceServer) testEmbeddedByValue() {} // UnsafeTestServiceServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to TestServiceServer will @@ -261,6 +265,13 @@ type UnsafeTestServiceServer interface { } func RegisterTestServiceServer(s grpc.ServiceRegistrar, srv TestServiceServer) { + // If the following call panics, it indicates UnimplementedTestServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&TestService_ServiceDesc, srv) } @@ -454,7 +465,7 @@ func (c *unimplementedServiceClient) UnimplementedCall(ctx context.Context, in * // UnimplementedServiceServer is the server API for UnimplementedService service. // All implementations must embed UnimplementedUnimplementedServiceServer -// for forward compatibility +// for forward compatibility. // // A simple service NOT implemented at servers so clients can test for // that case. @@ -464,14 +475,18 @@ type UnimplementedServiceServer interface { mustEmbedUnimplementedUnimplementedServiceServer() } -// UnimplementedUnimplementedServiceServer must be embedded to have forward compatible implementations. -type UnimplementedUnimplementedServiceServer struct { -} +// UnimplementedUnimplementedServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedUnimplementedServiceServer struct{} func (UnimplementedUnimplementedServiceServer) UnimplementedCall(context.Context, *Empty) (*Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method UnimplementedCall not implemented") } func (UnimplementedUnimplementedServiceServer) mustEmbedUnimplementedUnimplementedServiceServer() {} +func (UnimplementedUnimplementedServiceServer) testEmbeddedByValue() {} // UnsafeUnimplementedServiceServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to UnimplementedServiceServer will @@ -481,6 +496,13 @@ type UnsafeUnimplementedServiceServer interface { } func RegisterUnimplementedServiceServer(s grpc.ServiceRegistrar, srv UnimplementedServiceServer) { + // If the following call panics, it indicates UnimplementedUnimplementedServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&UnimplementedService_ServiceDesc, srv) } @@ -563,7 +585,7 @@ func (c *reconnectServiceClient) Stop(ctx context.Context, in *Empty, opts ...gr // ReconnectServiceServer is the server API for ReconnectService service. // All implementations must embed UnimplementedReconnectServiceServer -// for forward compatibility +// for forward compatibility. // // A service used to control reconnect server. type ReconnectServiceServer interface { @@ -572,9 +594,12 @@ type ReconnectServiceServer interface { mustEmbedUnimplementedReconnectServiceServer() } -// UnimplementedReconnectServiceServer must be embedded to have forward compatible implementations. -type UnimplementedReconnectServiceServer struct { -} +// UnimplementedReconnectServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedReconnectServiceServer struct{} func (UnimplementedReconnectServiceServer) Start(context.Context, *ReconnectParams) (*Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method Start not implemented") @@ -583,6 +608,7 @@ func (UnimplementedReconnectServiceServer) Stop(context.Context, *Empty) (*Recon return nil, status.Errorf(codes.Unimplemented, "method Stop not implemented") } func (UnimplementedReconnectServiceServer) mustEmbedUnimplementedReconnectServiceServer() {} +func (UnimplementedReconnectServiceServer) testEmbeddedByValue() {} // UnsafeReconnectServiceServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to ReconnectServiceServer will @@ -592,6 +618,13 @@ type UnsafeReconnectServiceServer interface { } func RegisterReconnectServiceServer(s grpc.ServiceRegistrar, srv ReconnectServiceServer) { + // If the following call panics, it indicates UnimplementedReconnectServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&ReconnectService_ServiceDesc, srv) } @@ -698,7 +731,7 @@ func (c *loadBalancerStatsServiceClient) GetClientAccumulatedStats(ctx context.C // LoadBalancerStatsServiceServer is the server API for LoadBalancerStatsService service. // All implementations must embed UnimplementedLoadBalancerStatsServiceServer -// for forward compatibility +// for forward compatibility. // // A service used to obtain stats for verifying LB behavior. type LoadBalancerStatsServiceServer interface { @@ -709,9 +742,12 @@ type LoadBalancerStatsServiceServer interface { mustEmbedUnimplementedLoadBalancerStatsServiceServer() } -// UnimplementedLoadBalancerStatsServiceServer must be embedded to have forward compatible implementations. -type UnimplementedLoadBalancerStatsServiceServer struct { -} +// UnimplementedLoadBalancerStatsServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedLoadBalancerStatsServiceServer struct{} func (UnimplementedLoadBalancerStatsServiceServer) GetClientStats(context.Context, *LoadBalancerStatsRequest) (*LoadBalancerStatsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetClientStats not implemented") @@ -721,6 +757,7 @@ func (UnimplementedLoadBalancerStatsServiceServer) GetClientAccumulatedStats(con } func (UnimplementedLoadBalancerStatsServiceServer) mustEmbedUnimplementedLoadBalancerStatsServiceServer() { } +func (UnimplementedLoadBalancerStatsServiceServer) testEmbeddedByValue() {} // UnsafeLoadBalancerStatsServiceServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to LoadBalancerStatsServiceServer will @@ -730,6 +767,13 @@ type UnsafeLoadBalancerStatsServiceServer interface { } func RegisterLoadBalancerStatsServiceServer(s grpc.ServiceRegistrar, srv LoadBalancerStatsServiceServer) { + // If the following call panics, it indicates UnimplementedLoadBalancerStatsServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&LoadBalancerStatsService_ServiceDesc, srv) } @@ -850,7 +894,7 @@ func (c *hookServiceClient) ClearReturnStatus(ctx context.Context, in *Empty, op // HookServiceServer is the server API for HookService service. // All implementations must embed UnimplementedHookServiceServer -// for forward compatibility +// for forward compatibility. // // Hook service. Used to keep Kubernetes from shutting the pod down. type HookServiceServer interface { @@ -864,9 +908,12 @@ type HookServiceServer interface { mustEmbedUnimplementedHookServiceServer() } -// UnimplementedHookServiceServer must be embedded to have forward compatible implementations. -type UnimplementedHookServiceServer struct { -} +// UnimplementedHookServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedHookServiceServer struct{} func (UnimplementedHookServiceServer) Hook(context.Context, *Empty) (*Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method Hook not implemented") @@ -878,6 +925,7 @@ func (UnimplementedHookServiceServer) ClearReturnStatus(context.Context, *Empty) return nil, status.Errorf(codes.Unimplemented, "method ClearReturnStatus not implemented") } func (UnimplementedHookServiceServer) mustEmbedUnimplementedHookServiceServer() {} +func (UnimplementedHookServiceServer) testEmbeddedByValue() {} // UnsafeHookServiceServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to HookServiceServer will @@ -887,6 +935,13 @@ type UnsafeHookServiceServer interface { } func RegisterHookServiceServer(s grpc.ServiceRegistrar, srv HookServiceServer) { + // If the following call panics, it indicates UnimplementedHookServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&HookService_ServiceDesc, srv) } @@ -1025,7 +1080,7 @@ func (c *xdsUpdateHealthServiceClient) SendHookRequest(ctx context.Context, in * // XdsUpdateHealthServiceServer is the server API for XdsUpdateHealthService service. // All implementations must embed UnimplementedXdsUpdateHealthServiceServer -// for forward compatibility +// for forward compatibility. // // A service to remotely control health status of an xDS test server. type XdsUpdateHealthServiceServer interface { @@ -1035,9 +1090,12 @@ type XdsUpdateHealthServiceServer interface { mustEmbedUnimplementedXdsUpdateHealthServiceServer() } -// UnimplementedXdsUpdateHealthServiceServer must be embedded to have forward compatible implementations. -type UnimplementedXdsUpdateHealthServiceServer struct { -} +// UnimplementedXdsUpdateHealthServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedXdsUpdateHealthServiceServer struct{} func (UnimplementedXdsUpdateHealthServiceServer) SetServing(context.Context, *Empty) (*Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method SetServing not implemented") @@ -1050,6 +1108,7 @@ func (UnimplementedXdsUpdateHealthServiceServer) SendHookRequest(context.Context } func (UnimplementedXdsUpdateHealthServiceServer) mustEmbedUnimplementedXdsUpdateHealthServiceServer() { } +func (UnimplementedXdsUpdateHealthServiceServer) testEmbeddedByValue() {} // UnsafeXdsUpdateHealthServiceServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to XdsUpdateHealthServiceServer will @@ -1059,6 +1118,13 @@ type UnsafeXdsUpdateHealthServiceServer interface { } func RegisterXdsUpdateHealthServiceServer(s grpc.ServiceRegistrar, srv XdsUpdateHealthServiceServer) { + // If the following call panics, it indicates UnimplementedXdsUpdateHealthServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&XdsUpdateHealthService_ServiceDesc, srv) } @@ -1174,7 +1240,7 @@ func (c *xdsUpdateClientConfigureServiceClient) Configure(ctx context.Context, i // XdsUpdateClientConfigureServiceServer is the server API for XdsUpdateClientConfigureService service. // All implementations must embed UnimplementedXdsUpdateClientConfigureServiceServer -// for forward compatibility +// for forward compatibility. // // A service to dynamically update the configuration of an xDS test client. type XdsUpdateClientConfigureServiceServer interface { @@ -1183,15 +1249,19 @@ type XdsUpdateClientConfigureServiceServer interface { mustEmbedUnimplementedXdsUpdateClientConfigureServiceServer() } -// UnimplementedXdsUpdateClientConfigureServiceServer must be embedded to have forward compatible implementations. -type UnimplementedXdsUpdateClientConfigureServiceServer struct { -} +// UnimplementedXdsUpdateClientConfigureServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedXdsUpdateClientConfigureServiceServer struct{} func (UnimplementedXdsUpdateClientConfigureServiceServer) Configure(context.Context, *ClientConfigureRequest) (*ClientConfigureResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Configure not implemented") } func (UnimplementedXdsUpdateClientConfigureServiceServer) mustEmbedUnimplementedXdsUpdateClientConfigureServiceServer() { } +func (UnimplementedXdsUpdateClientConfigureServiceServer) testEmbeddedByValue() {} // UnsafeXdsUpdateClientConfigureServiceServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to XdsUpdateClientConfigureServiceServer will @@ -1201,6 +1271,13 @@ type UnsafeXdsUpdateClientConfigureServiceServer interface { } func RegisterXdsUpdateClientConfigureServiceServer(s grpc.ServiceRegistrar, srv XdsUpdateClientConfigureServiceServer) { + // If the following call panics, it indicates UnimplementedXdsUpdateClientConfigureServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&XdsUpdateClientConfigureService_ServiceDesc, srv) } diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/worker_service.pb.go b/vendor/google.golang.org/grpc/interop/grpc_testing/worker_service.pb.go index 849b01667778..8e7b5509ae67 100644 --- a/vendor/google.golang.org/grpc/interop/grpc_testing/worker_service.pb.go +++ b/vendor/google.golang.org/grpc/interop/grpc_testing/worker_service.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/testing/worker_service.proto package grpc_testing @@ -67,7 +67,7 @@ var file_grpc_testing_worker_service_proto_rawDesc = []byte{ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -var file_grpc_testing_worker_service_proto_goTypes = []interface{}{ +var file_grpc_testing_worker_service_proto_goTypes = []any{ (*ServerArgs)(nil), // 0: grpc.testing.ServerArgs (*ClientArgs)(nil), // 1: grpc.testing.ClientArgs (*CoreRequest)(nil), // 2: grpc.testing.CoreRequest diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/worker_service_grpc.pb.go b/vendor/google.golang.org/grpc/interop/grpc_testing/worker_service_grpc.pb.go index 32ff7d9e2431..40cb6949ffe2 100644 --- a/vendor/google.golang.org/grpc/interop/grpc_testing/worker_service_grpc.pb.go +++ b/vendor/google.golang.org/grpc/interop/grpc_testing/worker_service_grpc.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.4.0 -// - protoc v4.25.2 +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.1 // source: grpc/testing/worker_service.proto package grpc_testing @@ -122,7 +122,7 @@ func (c *workerServiceClient) QuitWorker(ctx context.Context, in *Void, opts ... // WorkerServiceServer is the server API for WorkerService service. // All implementations must embed UnimplementedWorkerServiceServer -// for forward compatibility +// for forward compatibility. type WorkerServiceServer interface { // Start server with specified workload. // First request sent specifies the ServerConfig followed by ServerStatus @@ -145,9 +145,12 @@ type WorkerServiceServer interface { mustEmbedUnimplementedWorkerServiceServer() } -// UnimplementedWorkerServiceServer must be embedded to have forward compatible implementations. -type UnimplementedWorkerServiceServer struct { -} +// UnimplementedWorkerServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedWorkerServiceServer struct{} func (UnimplementedWorkerServiceServer) RunServer(grpc.BidiStreamingServer[ServerArgs, ServerStatus]) error { return status.Errorf(codes.Unimplemented, "method RunServer not implemented") @@ -162,6 +165,7 @@ func (UnimplementedWorkerServiceServer) QuitWorker(context.Context, *Void) (*Voi return nil, status.Errorf(codes.Unimplemented, "method QuitWorker not implemented") } func (UnimplementedWorkerServiceServer) mustEmbedUnimplementedWorkerServiceServer() {} +func (UnimplementedWorkerServiceServer) testEmbeddedByValue() {} // UnsafeWorkerServiceServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to WorkerServiceServer will @@ -171,6 +175,13 @@ type UnsafeWorkerServiceServer interface { } func RegisterWorkerServiceServer(s grpc.ServiceRegistrar, srv WorkerServiceServer) { + // If the following call panics, it indicates UnimplementedWorkerServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&WorkerService_ServiceDesc, srv) } diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go index 34d31b5e7d31..eb42b19fb99a 100644 --- a/vendor/google.golang.org/grpc/keepalive/keepalive.go +++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -34,15 +34,29 @@ type ClientParameters struct { // After a duration of this time if the client doesn't see any activity it // pings the server to see if the transport is still alive. // If set below 10s, a minimum value of 10s will be used instead. - Time time.Duration // The current default value is infinity. + // + // Note that gRPC servers have a default EnforcementPolicy.MinTime of 5 + // minutes (which means the client shouldn't ping more frequently than every + // 5 minutes). + // + // Though not ideal, it's not a strong requirement for Time to be less than + // EnforcementPolicy.MinTime. Time will automatically double if the server + // disconnects due to its enforcement policy. + // + // For more details, see + // https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md + Time time.Duration // After having pinged for keepalive check, the client waits for a duration // of Timeout and if no activity is seen even after that the connection is // closed. - Timeout time.Duration // The current default value is 20 seconds. + // + // If keepalive is enabled, and this value is not explicitly set, the default + // is 20 seconds. + Timeout time.Duration // If true, client sends keepalive pings even with no active RPCs. If false, // when there are no active RPCs, Time and Timeout will be ignored and no // keepalive pings will be sent. - PermitWithoutStream bool // false by default. + PermitWithoutStream bool } // ServerParameters is used to set keepalive and max-age parameters on the diff --git a/vendor/google.golang.org/grpc/mem/buffer_pool.go b/vendor/google.golang.org/grpc/mem/buffer_pool.go new file mode 100644 index 000000000000..c37c58c0233e --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffer_pool.go @@ -0,0 +1,194 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package mem + +import ( + "sort" + "sync" + + "google.golang.org/grpc/internal" +) + +// BufferPool is a pool of buffers that can be shared and reused, resulting in +// decreased memory allocation. +type BufferPool interface { + // Get returns a buffer with specified length from the pool. + Get(length int) *[]byte + + // Put returns a buffer to the pool. + Put(*[]byte) +} + +var defaultBufferPoolSizes = []int{ + 256, + 4 << 10, // 4KB (go page size) + 16 << 10, // 16KB (max HTTP/2 frame size used by gRPC) + 32 << 10, // 32KB (default buffer size for io.Copy) + 1 << 20, // 1MB +} + +var defaultBufferPool BufferPool + +func init() { + defaultBufferPool = NewTieredBufferPool(defaultBufferPoolSizes...) + + internal.SetDefaultBufferPoolForTesting = func(pool BufferPool) { + defaultBufferPool = pool + } + + internal.SetBufferPoolingThresholdForTesting = func(threshold int) { + bufferPoolingThreshold = threshold + } +} + +// DefaultBufferPool returns the current default buffer pool. It is a BufferPool +// created with NewBufferPool that uses a set of default sizes optimized for +// expected workflows. +func DefaultBufferPool() BufferPool { + return defaultBufferPool +} + +// NewTieredBufferPool returns a BufferPool implementation that uses multiple +// underlying pools of the given pool sizes. +func NewTieredBufferPool(poolSizes ...int) BufferPool { + sort.Ints(poolSizes) + pools := make([]*sizedBufferPool, len(poolSizes)) + for i, s := range poolSizes { + pools[i] = newSizedBufferPool(s) + } + return &tieredBufferPool{ + sizedPools: pools, + } +} + +// tieredBufferPool implements the BufferPool interface with multiple tiers of +// buffer pools for different sizes of buffers. +type tieredBufferPool struct { + sizedPools []*sizedBufferPool + fallbackPool simpleBufferPool +} + +func (p *tieredBufferPool) Get(size int) *[]byte { + return p.getPool(size).Get(size) +} + +func (p *tieredBufferPool) Put(buf *[]byte) { + p.getPool(cap(*buf)).Put(buf) +} + +func (p *tieredBufferPool) getPool(size int) BufferPool { + poolIdx := sort.Search(len(p.sizedPools), func(i int) bool { + return p.sizedPools[i].defaultSize >= size + }) + + if poolIdx == len(p.sizedPools) { + return &p.fallbackPool + } + + return p.sizedPools[poolIdx] +} + +// sizedBufferPool is a BufferPool implementation that is optimized for specific +// buffer sizes. For example, HTTP/2 frames within gRPC have a default max size +// of 16kb and a sizedBufferPool can be configured to only return buffers with a +// capacity of 16kb. Note that however it does not support returning larger +// buffers and in fact panics if such a buffer is requested. Because of this, +// this BufferPool implementation is not meant to be used on its own and rather +// is intended to be embedded in a tieredBufferPool such that Get is only +// invoked when the required size is smaller than or equal to defaultSize. +type sizedBufferPool struct { + pool sync.Pool + defaultSize int +} + +func (p *sizedBufferPool) Get(size int) *[]byte { + buf := p.pool.Get().(*[]byte) + b := *buf + clear(b[:cap(b)]) + *buf = b[:size] + return buf +} + +func (p *sizedBufferPool) Put(buf *[]byte) { + if cap(*buf) < p.defaultSize { + // Ignore buffers that are too small to fit in the pool. Otherwise, when + // Get is called it will panic as it tries to index outside the bounds + // of the buffer. + return + } + p.pool.Put(buf) +} + +func newSizedBufferPool(size int) *sizedBufferPool { + return &sizedBufferPool{ + pool: sync.Pool{ + New: func() any { + buf := make([]byte, size) + return &buf + }, + }, + defaultSize: size, + } +} + +var _ BufferPool = (*simpleBufferPool)(nil) + +// simpleBufferPool is an implementation of the BufferPool interface that +// attempts to pool buffers with a sync.Pool. When Get is invoked, it tries to +// acquire a buffer from the pool but if that buffer is too small, it returns it +// to the pool and creates a new one. +type simpleBufferPool struct { + pool sync.Pool +} + +func (p *simpleBufferPool) Get(size int) *[]byte { + bs, ok := p.pool.Get().(*[]byte) + if ok && cap(*bs) >= size { + *bs = (*bs)[:size] + return bs + } + + // A buffer was pulled from the pool, but it is too small. Put it back in + // the pool and create one large enough. + if ok { + p.pool.Put(bs) + } + + b := make([]byte, size) + return &b +} + +func (p *simpleBufferPool) Put(buf *[]byte) { + p.pool.Put(buf) +} + +var _ BufferPool = NopBufferPool{} + +// NopBufferPool is a buffer pool that returns new buffers without pooling. +type NopBufferPool struct{} + +// Get returns a buffer with specified length from the pool. +func (NopBufferPool) Get(length int) *[]byte { + b := make([]byte, length) + return &b +} + +// Put returns a buffer to the pool. +func (NopBufferPool) Put(*[]byte) { +} diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go new file mode 100644 index 000000000000..228e9c2f20f2 --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffer_slice.go @@ -0,0 +1,226 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package mem + +import ( + "io" +) + +// BufferSlice offers a means to represent data that spans one or more Buffer +// instances. A BufferSlice is meant to be immutable after creation, and methods +// like Ref create and return copies of the slice. This is why all methods have +// value receivers rather than pointer receivers. +// +// Note that any of the methods that read the underlying buffers such as Ref, +// Len or CopyTo etc., will panic if any underlying buffers have already been +// freed. It is recommended to not directly interact with any of the underlying +// buffers directly, rather such interactions should be mediated through the +// various methods on this type. +// +// By convention, any APIs that return (mem.BufferSlice, error) should reduce +// the burden on the caller by never returning a mem.BufferSlice that needs to +// be freed if the error is non-nil, unless explicitly stated. +type BufferSlice []Buffer + +// Len returns the sum of the length of all the Buffers in this slice. +// +// # Warning +// +// Invoking the built-in len on a BufferSlice will return the number of buffers +// in the slice, and *not* the value returned by this function. +func (s BufferSlice) Len() int { + var length int + for _, b := range s { + length += b.Len() + } + return length +} + +// Ref invokes Ref on each buffer in the slice. +func (s BufferSlice) Ref() { + for _, b := range s { + b.Ref() + } +} + +// Free invokes Buffer.Free() on each Buffer in the slice. +func (s BufferSlice) Free() { + for _, b := range s { + b.Free() + } +} + +// CopyTo copies each of the underlying Buffer's data into the given buffer, +// returning the number of bytes copied. Has the same semantics as the copy +// builtin in that it will copy as many bytes as it can, stopping when either dst +// is full or s runs out of data, returning the minimum of s.Len() and len(dst). +func (s BufferSlice) CopyTo(dst []byte) int { + off := 0 + for _, b := range s { + off += copy(dst[off:], b.ReadOnlyData()) + } + return off +} + +// Materialize concatenates all the underlying Buffer's data into a single +// contiguous buffer using CopyTo. +func (s BufferSlice) Materialize() []byte { + l := s.Len() + if l == 0 { + return nil + } + out := make([]byte, l) + s.CopyTo(out) + return out +} + +// MaterializeToBuffer functions like Materialize except that it writes the data +// to a single Buffer pulled from the given BufferPool. +// +// As a special case, if the input BufferSlice only actually has one Buffer, this +// function simply increases the refcount before returning said Buffer. Freeing this +// buffer won't release it until the BufferSlice is itself released. +func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer { + if len(s) == 1 { + s[0].Ref() + return s[0] + } + sLen := s.Len() + if sLen == 0 { + return emptyBuffer{} + } + buf := pool.Get(sLen) + s.CopyTo(*buf) + return NewBuffer(buf, pool) +} + +// Reader returns a new Reader for the input slice after taking references to +// each underlying buffer. +func (s BufferSlice) Reader() Reader { + s.Ref() + return &sliceReader{ + data: s, + len: s.Len(), + } +} + +// Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface +// with other parts systems. It also provides an additional convenience method +// Remaining(), which returns the number of unread bytes remaining in the slice. +// Buffers will be freed as they are read. +type Reader interface { + io.Reader + io.ByteReader + // Close frees the underlying BufferSlice and never returns an error. Subsequent + // calls to Read will return (0, io.EOF). + Close() error + // Remaining returns the number of unread bytes remaining in the slice. + Remaining() int +} + +type sliceReader struct { + data BufferSlice + len int + // The index into data[0].ReadOnlyData(). + bufferIdx int +} + +func (r *sliceReader) Remaining() int { + return r.len +} + +func (r *sliceReader) Close() error { + r.data.Free() + r.data = nil + r.len = 0 + return nil +} + +func (r *sliceReader) freeFirstBufferIfEmpty() bool { + if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) { + return false + } + + r.data[0].Free() + r.data = r.data[1:] + r.bufferIdx = 0 + return true +} + +func (r *sliceReader) Read(buf []byte) (n int, _ error) { + if r.len == 0 { + return 0, io.EOF + } + + for len(buf) != 0 && r.len != 0 { + // Copy as much as possible from the first Buffer in the slice into the + // given byte slice. + data := r.data[0].ReadOnlyData() + copied := copy(buf, data[r.bufferIdx:]) + r.len -= copied // Reduce len by the number of bytes copied. + r.bufferIdx += copied // Increment the buffer index. + n += copied // Increment the total number of bytes read. + buf = buf[copied:] // Shrink the given byte slice. + + // If we have copied all the data from the first Buffer, free it and advance to + // the next in the slice. + r.freeFirstBufferIfEmpty() + } + + return n, nil +} + +func (r *sliceReader) ReadByte() (byte, error) { + if r.len == 0 { + return 0, io.EOF + } + + // There may be any number of empty buffers in the slice, clear them all until a + // non-empty buffer is reached. This is guaranteed to exit since r.len is not 0. + for r.freeFirstBufferIfEmpty() { + } + + b := r.data[0].ReadOnlyData()[r.bufferIdx] + r.len-- + r.bufferIdx++ + // Free the first buffer in the slice if the last byte was read + r.freeFirstBufferIfEmpty() + return b, nil +} + +var _ io.Writer = (*writer)(nil) + +type writer struct { + buffers *BufferSlice + pool BufferPool +} + +func (w *writer) Write(p []byte) (n int, err error) { + b := Copy(p, w.pool) + *w.buffers = append(*w.buffers, b) + return b.Len(), nil +} + +// NewWriter wraps the given BufferSlice and BufferPool to implement the +// io.Writer interface. Every call to Write copies the contents of the given +// buffer into a new Buffer pulled from the given pool and the Buffer is added to +// the given BufferSlice. +func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer { + return &writer{buffers: buffers, pool: pool} +} diff --git a/vendor/google.golang.org/grpc/mem/buffers.go b/vendor/google.golang.org/grpc/mem/buffers.go new file mode 100644 index 000000000000..4d66b2ccc2be --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffers.go @@ -0,0 +1,252 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package mem provides utilities that facilitate memory reuse in byte slices +// that are used as buffers. +// +// # Experimental +// +// Notice: All APIs in this package are EXPERIMENTAL and may be changed or +// removed in a later release. +package mem + +import ( + "fmt" + "sync" + "sync/atomic" +) + +// A Buffer represents a reference counted piece of data (in bytes) that can be +// acquired by a call to NewBuffer() or Copy(). A reference to a Buffer may be +// released by calling Free(), which invokes the free function given at creation +// only after all references are released. +// +// Note that a Buffer is not safe for concurrent access and instead each +// goroutine should use its own reference to the data, which can be acquired via +// a call to Ref(). +// +// Attempts to access the underlying data after releasing the reference to the +// Buffer will panic. +type Buffer interface { + // ReadOnlyData returns the underlying byte slice. Note that it is undefined + // behavior to modify the contents of this slice in any way. + ReadOnlyData() []byte + // Ref increases the reference counter for this Buffer. + Ref() + // Free decrements this Buffer's reference counter and frees the underlying + // byte slice if the counter reaches 0 as a result of this call. + Free() + // Len returns the Buffer's size. + Len() int + + split(n int) (left, right Buffer) + read(buf []byte) (int, Buffer) +} + +var ( + bufferPoolingThreshold = 1 << 10 + + bufferObjectPool = sync.Pool{New: func() any { return new(buffer) }} + refObjectPool = sync.Pool{New: func() any { return new(atomic.Int32) }} +) + +func IsBelowBufferPoolingThreshold(size int) bool { + return size <= bufferPoolingThreshold +} + +type buffer struct { + origData *[]byte + data []byte + refs *atomic.Int32 + pool BufferPool +} + +func newBuffer() *buffer { + return bufferObjectPool.Get().(*buffer) +} + +// NewBuffer creates a new Buffer from the given data, initializing the reference +// counter to 1. The data will then be returned to the given pool when all +// references to the returned Buffer are released. As a special case to avoid +// additional allocations, if the given buffer pool is nil, the returned buffer +// will be a "no-op" Buffer where invoking Buffer.Free() does nothing and the +// underlying data is never freed. +// +// Note that the backing array of the given data is not copied. +func NewBuffer(data *[]byte, pool BufferPool) Buffer { + if pool == nil || IsBelowBufferPoolingThreshold(len(*data)) { + return (SliceBuffer)(*data) + } + b := newBuffer() + b.origData = data + b.data = *data + b.pool = pool + b.refs = refObjectPool.Get().(*atomic.Int32) + b.refs.Add(1) + return b +} + +// Copy creates a new Buffer from the given data, initializing the reference +// counter to 1. +// +// It acquires a []byte from the given pool and copies over the backing array +// of the given data. The []byte acquired from the pool is returned to the +// pool when all references to the returned Buffer are released. +func Copy(data []byte, pool BufferPool) Buffer { + if IsBelowBufferPoolingThreshold(len(data)) { + buf := make(SliceBuffer, len(data)) + copy(buf, data) + return buf + } + + buf := pool.Get(len(data)) + copy(*buf, data) + return NewBuffer(buf, pool) +} + +func (b *buffer) ReadOnlyData() []byte { + if b.refs == nil { + panic("Cannot read freed buffer") + } + return b.data +} + +func (b *buffer) Ref() { + if b.refs == nil { + panic("Cannot ref freed buffer") + } + b.refs.Add(1) +} + +func (b *buffer) Free() { + if b.refs == nil { + panic("Cannot free freed buffer") + } + + refs := b.refs.Add(-1) + switch { + case refs > 0: + return + case refs == 0: + if b.pool != nil { + b.pool.Put(b.origData) + } + + refObjectPool.Put(b.refs) + b.origData = nil + b.data = nil + b.refs = nil + b.pool = nil + bufferObjectPool.Put(b) + default: + panic("Cannot free freed buffer") + } +} + +func (b *buffer) Len() int { + return len(b.ReadOnlyData()) +} + +func (b *buffer) split(n int) (Buffer, Buffer) { + if b.refs == nil { + panic("Cannot split freed buffer") + } + + b.refs.Add(1) + split := newBuffer() + split.origData = b.origData + split.data = b.data[n:] + split.refs = b.refs + split.pool = b.pool + + b.data = b.data[:n] + + return b, split +} + +func (b *buffer) read(buf []byte) (int, Buffer) { + if b.refs == nil { + panic("Cannot read freed buffer") + } + + n := copy(buf, b.data) + if n == len(b.data) { + b.Free() + return n, nil + } + + b.data = b.data[n:] + return n, b +} + +// String returns a string representation of the buffer. May be used for +// debugging purposes. +func (b *buffer) String() string { + return fmt.Sprintf("mem.Buffer(%p, data: %p, length: %d)", b, b.ReadOnlyData(), len(b.ReadOnlyData())) +} + +func ReadUnsafe(dst []byte, buf Buffer) (int, Buffer) { + return buf.read(dst) +} + +// SplitUnsafe modifies the receiver to point to the first n bytes while it +// returns a new reference to the remaining bytes. The returned Buffer functions +// just like a normal reference acquired using Ref(). +func SplitUnsafe(buf Buffer, n int) (left, right Buffer) { + return buf.split(n) +} + +type emptyBuffer struct{} + +func (e emptyBuffer) ReadOnlyData() []byte { + return nil +} + +func (e emptyBuffer) Ref() {} +func (e emptyBuffer) Free() {} + +func (e emptyBuffer) Len() int { + return 0 +} + +func (e emptyBuffer) split(int) (left, right Buffer) { + return e, e +} + +func (e emptyBuffer) read([]byte) (int, Buffer) { + return 0, e +} + +type SliceBuffer []byte + +func (s SliceBuffer) ReadOnlyData() []byte { return s } +func (s SliceBuffer) Ref() {} +func (s SliceBuffer) Free() {} +func (s SliceBuffer) Len() int { return len(s) } + +func (s SliceBuffer) split(n int) (left, right Buffer) { + return s[:n], s[n:] +} + +func (s SliceBuffer) read(buf []byte) (int, Buffer) { + n := copy(buf, s) + if n == len(s) { + return n, nil + } + return n, s[n:] +} diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index 1e9485fd6e26..d2e15253bbfb 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -213,11 +213,6 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { // ValueFromIncomingContext returns the metadata value corresponding to the metadata // key from the incoming metadata if it exists. Keys are matched in a case insensitive // manner. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. func ValueFromIncomingContext(ctx context.Context, key string) []string { md, ok := ctx.Value(mdIncomingKey{}).(MD) if !ok { @@ -228,7 +223,7 @@ func ValueFromIncomingContext(ctx context.Context, key string) []string { return copyOf(v) } for k, v := range md { - // Case insenitive comparison: MD is a map, and there's no guarantee + // Case insensitive comparison: MD is a map, and there's no guarantee // that the MD attached to the context is created using our helper // functions. if strings.EqualFold(k, key) { diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index 73bd63364335..e87a17f36a50 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -20,6 +20,7 @@ package grpc import ( "google.golang.org/grpc/codes" + "google.golang.org/grpc/mem" "google.golang.org/grpc/status" ) @@ -31,9 +32,10 @@ import ( // later release. type PreparedMsg struct { // Struct for preparing msg before sending them - encodedData []byte + encodedData mem.BufferSlice hdr []byte - payload []byte + payload mem.BufferSlice + pf payloadFormat } // Encode marshalls and compresses the message using the codec and compressor for the stream. @@ -57,11 +59,27 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error { if err != nil { return err } - p.encodedData = data - compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp) + + materializedData := data.Materialize() + data.Free() + p.encodedData = mem.BufferSlice{mem.NewBuffer(&materializedData, nil)} + + // TODO: it should be possible to grab the bufferPool from the underlying + // stream implementation with a type cast to its actual type (such as + // addrConnStream) and accessing the buffer pool directly. + var compData mem.BufferSlice + compData, p.pf, err = compress(p.encodedData, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp, mem.DefaultBufferPool()) if err != nil { return err } - p.hdr, p.payload = msgHeader(data, compData) + + if p.pf.isCompressed() { + materializedCompData := compData.Materialize() + compData.Free() + compData = mem.BufferSlice{mem.NewBuffer(&materializedCompData, nil)} + } + + p.hdr, p.payload = msgHeader(p.encodedData, compData, p.pf) + return nil } diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh deleted file mode 100644 index 3edca296c224..000000000000 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ /dev/null @@ -1,123 +0,0 @@ -#!/bin/bash -# Copyright 2020 gRPC authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eu -o pipefail - -WORKDIR=$(mktemp -d) - -function finish { - rm -rf "$WORKDIR" -} -trap finish EXIT - -export GOBIN=${WORKDIR}/bin -export PATH=${GOBIN}:${PATH} -mkdir -p ${GOBIN} - -echo "remove existing generated files" -# grpc_testing_not_regenerate/*.pb.go is not re-generated, -# see grpc_testing_not_regenerate/README.md for details. -rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate') - -echo "go install google.golang.org/protobuf/cmd/protoc-gen-go" -(cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go) - -echo "go install cmd/protoc-gen-go-grpc" -(cd cmd/protoc-gen-go-grpc && go install .) - -echo "git clone https://github.com/grpc/grpc-proto" -git clone --quiet https://github.com/grpc/grpc-proto ${WORKDIR}/grpc-proto - -echo "git clone https://github.com/protocolbuffers/protobuf" -git clone --quiet https://github.com/protocolbuffers/protobuf ${WORKDIR}/protobuf - -# Pull in code.proto as a proto dependency -mkdir -p ${WORKDIR}/googleapis/google/rpc -echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto" -curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto - -mkdir -p ${WORKDIR}/out - -# Generates sources without the embed requirement -LEGACY_SOURCES=( - ${WORKDIR}/grpc-proto/grpc/binlog/v1/binarylog.proto - ${WORKDIR}/grpc-proto/grpc/channelz/v1/channelz.proto - ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto - ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto - profiling/proto/service.proto - ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto - ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto -) - -# Generates only the new gRPC Service symbols -SOURCES=( - $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^profiling/proto/service.proto$') - ${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto - ${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto - ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto - ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto - ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto - ${WORKDIR}/grpc-proto/grpc/testing/*.proto - ${WORKDIR}/grpc-proto/grpc/core/*.proto -) - -# These options of the form 'Mfoo.proto=bar' instruct the codegen to use an -# import path of 'bar' in the generated code when 'foo.proto' is imported in -# one of the sources. -# -# Note that the protos listed here are all for testing purposes. All protos to -# be used externally should have a go_package option (and they don't need to be -# listed here). -OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ -Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing - -for src in ${SOURCES[@]}; do - echo "protoc ${src}" - protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},use_generic_streams_experimental=true:${WORKDIR}/out \ - -I"." \ - -I${WORKDIR}/grpc-proto \ - -I${WORKDIR}/googleapis \ - -I${WORKDIR}/protobuf/src \ - ${src} -done - -for src in ${LEGACY_SOURCES[@]}; do - echo "protoc ${src}" - protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},require_unimplemented_servers=false:${WORKDIR}/out \ - -I"." \ - -I${WORKDIR}/grpc-proto \ - -I${WORKDIR}/googleapis \ - -I${WORKDIR}/protobuf/src \ - ${src} -done - -# The go_package option in grpc/lookup/v1/rls.proto doesn't match the -# current location. Move it into the right place. -mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 -mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 - -# grpc_testing_not_regenerate/*.pb.go are not re-generated, -# see grpc_testing_not_regenerate/README.md for details. -rm ${WORKDIR}/out/google.golang.org/grpc/reflection/test/grpc_testing_not_regenerate/*.pb.go - -cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/resolver/manual/manual.go b/vendor/google.golang.org/grpc/resolver/manual/manual.go index f2efa2a2cb5a..09e864a89d35 100644 --- a/vendor/google.golang.org/grpc/resolver/manual/manual.go +++ b/vendor/google.golang.org/grpc/resolver/manual/manual.go @@ -76,9 +76,11 @@ func (r *Resolver) InitialState(s resolver.State) { // Build returns itself for Resolver, because it's both a builder and a resolver. func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { - r.BuildCallback(target, cc, opts) r.mu.Lock() defer r.mu.Unlock() + // Call BuildCallback after locking to avoid a race when UpdateState + // or ReportError is called before Build returns. + r.BuildCallback(target, cc, opts) r.CC = cc if r.lastSeenState != nil { err := r.CC.UpdateState(*r.lastSeenState) diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go index c5fb45236faf..23bb3fb25824 100644 --- a/vendor/google.golang.org/grpc/resolver_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -66,7 +66,7 @@ func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper { // any newly created ccResolverWrapper, except that close may be called instead. func (ccr *ccResolverWrapper) start() error { errCh := make(chan error) - ccr.serializer.Schedule(func(ctx context.Context) { + ccr.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil { return } @@ -85,7 +85,7 @@ func (ccr *ccResolverWrapper) start() error { } func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { - ccr.serializer.Schedule(func(ctx context.Context) { + ccr.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || ccr.resolver == nil { return } @@ -102,7 +102,7 @@ func (ccr *ccResolverWrapper) close() { ccr.closed = true ccr.mu.Unlock() - ccr.serializer.Schedule(func(context.Context) { + ccr.serializer.TrySchedule(func(context.Context) { if ccr.resolver == nil { return } @@ -177,6 +177,9 @@ func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.P // addChannelzTraceEvent adds a channelz trace event containing the new // state received from resolver implementations. func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + if !logger.V(0) && !channelz.IsOn() { + return + } var updates []string var oldSC, newSC *ServiceConfig var oldOK, newOK bool diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index fdd49e6e9151..2d96f1405e8d 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -19,7 +19,6 @@ package grpc import ( - "bytes" "compress/gzip" "context" "encoding/binary" @@ -35,6 +34,7 @@ import ( "google.golang.org/grpc/encoding" "google.golang.org/grpc/encoding/proto" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -220,8 +220,8 @@ type HeaderCallOption struct { HeaderAddr *metadata.MD } -func (o HeaderCallOption) before(c *callInfo) error { return nil } -func (o HeaderCallOption) after(c *callInfo, attempt *csAttempt) { +func (o HeaderCallOption) before(*callInfo) error { return nil } +func (o HeaderCallOption) after(_ *callInfo, attempt *csAttempt) { *o.HeaderAddr, _ = attempt.s.Header() } @@ -242,8 +242,8 @@ type TrailerCallOption struct { TrailerAddr *metadata.MD } -func (o TrailerCallOption) before(c *callInfo) error { return nil } -func (o TrailerCallOption) after(c *callInfo, attempt *csAttempt) { +func (o TrailerCallOption) before(*callInfo) error { return nil } +func (o TrailerCallOption) after(_ *callInfo, attempt *csAttempt) { *o.TrailerAddr = attempt.s.Trailer() } @@ -264,24 +264,20 @@ type PeerCallOption struct { PeerAddr *peer.Peer } -func (o PeerCallOption) before(c *callInfo) error { return nil } -func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) { +func (o PeerCallOption) before(*callInfo) error { return nil } +func (o PeerCallOption) after(_ *callInfo, attempt *csAttempt) { if x, ok := peer.FromContext(attempt.s.Context()); ok { *o.PeerAddr = *x } } -// WaitForReady configures the action to take when an RPC is attempted on broken -// connections or unreachable servers. If waitForReady is false and the -// connection is in the TRANSIENT_FAILURE state, the RPC will fail -// immediately. Otherwise, the RPC client will block the call until a -// connection is available (or the call is canceled or times out) and will -// retry the call if it fails due to a transient error. gRPC will not retry if -// data was written to the wire unless the server indicates it did not process -// the data. Please refer to -// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. +// WaitForReady configures the RPC's behavior when the client is in +// TRANSIENT_FAILURE, which occurs when all addresses fail to connect. If +// waitForReady is false, the RPC will fail immediately. Otherwise, the client +// will wait until a connection becomes available or the RPC's deadline is +// reached. // -// By default, RPCs don't "wait for ready". +// By default, RPCs do not "wait for ready". func WaitForReady(waitForReady bool) CallOption { return FailFastCallOption{FailFast: !waitForReady} } @@ -308,7 +304,7 @@ func (o FailFastCallOption) before(c *callInfo) error { c.failFast = o.FailFast return nil } -func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o FailFastCallOption) after(*callInfo, *csAttempt) {} // OnFinish returns a CallOption that configures a callback to be called when // the call completes. The error passed to the callback is the status of the @@ -343,7 +339,7 @@ func (o OnFinishCallOption) before(c *callInfo) error { return nil } -func (o OnFinishCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o OnFinishCallOption) after(*callInfo, *csAttempt) {} // MaxCallRecvMsgSize returns a CallOption which sets the maximum message size // in bytes the client can receive. If this is not set, gRPC uses the default @@ -367,7 +363,7 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { c.maxReceiveMessageSize = &o.MaxRecvMsgSize return nil } -func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o MaxRecvMsgSizeCallOption) after(*callInfo, *csAttempt) {} // MaxCallSendMsgSize returns a CallOption which sets the maximum message size // in bytes the client can send. If this is not set, gRPC uses the default @@ -391,7 +387,7 @@ func (o MaxSendMsgSizeCallOption) before(c *callInfo) error { c.maxSendMessageSize = &o.MaxSendMsgSize return nil } -func (o MaxSendMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o MaxSendMsgSizeCallOption) after(*callInfo, *csAttempt) {} // PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials // for a call. @@ -414,7 +410,7 @@ func (o PerRPCCredsCallOption) before(c *callInfo) error { c.creds = o.Creds return nil } -func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o PerRPCCredsCallOption) after(*callInfo, *csAttempt) {} // UseCompressor returns a CallOption which sets the compressor used when // sending the request. If WithCompressor is also set, UseCompressor has @@ -442,7 +438,7 @@ func (o CompressorCallOption) before(c *callInfo) error { c.compressorType = o.CompressorType return nil } -func (o CompressorCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o CompressorCallOption) after(*callInfo, *csAttempt) {} // CallContentSubtype returns a CallOption that will set the content-subtype // for a call. For example, if content-subtype is "json", the Content-Type over @@ -479,7 +475,7 @@ func (o ContentSubtypeCallOption) before(c *callInfo) error { c.contentSubtype = o.ContentSubtype return nil } -func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o ContentSubtypeCallOption) after(*callInfo, *csAttempt) {} // ForceCodec returns a CallOption that will set codec to be used for all // request and response messages for a call. The result of calling Name() will @@ -515,10 +511,50 @@ type ForceCodecCallOption struct { } func (o ForceCodecCallOption) before(c *callInfo) error { - c.codec = o.Codec + c.codec = newCodecV1Bridge(o.Codec) return nil } -func (o ForceCodecCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o ForceCodecCallOption) after(*callInfo, *csAttempt) {} + +// ForceCodecV2 returns a CallOption that will set codec to be used for all +// request and response messages for a call. The result of calling Name() will +// be used as the content-subtype after converting to lowercase, unless +// CallContentSubtype is also used. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between Codec and +// content-subtype. +// +// This function is provided for advanced users; prefer to use only +// CallContentSubtype to select a registered codec instead. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceCodecV2(codec encoding.CodecV2) CallOption { + return ForceCodecV2CallOption{CodecV2: codec} +} + +// ForceCodecV2CallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ForceCodecV2CallOption struct { + CodecV2 encoding.CodecV2 +} + +func (o ForceCodecV2CallOption) before(c *callInfo) error { + c.codec = o.CodecV2 + return nil +} + +func (o ForceCodecV2CallOption) after(*callInfo, *csAttempt) {} // CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of // an encoding.Codec. @@ -540,10 +576,10 @@ type CustomCodecCallOption struct { } func (o CustomCodecCallOption) before(c *callInfo) error { - c.codec = o.Codec + c.codec = newCodecV0Bridge(o.Codec) return nil } -func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o CustomCodecCallOption) after(*callInfo, *csAttempt) {} // MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory // used for buffering this RPC's requests for retry purposes. @@ -571,7 +607,7 @@ func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error { c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize return nil } -func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o MaxRetryRPCBufferSizeCallOption) after(*callInfo, *csAttempt) {} // The format of the payload: compressed or not? type payloadFormat uint8 @@ -581,19 +617,28 @@ const ( compressionMade payloadFormat = 1 // compressed ) +func (pf payloadFormat) isCompressed() bool { + return pf == compressionMade +} + +type streamReader interface { + ReadHeader(header []byte) error + Read(n int) (mem.BufferSlice, error) +} + // parser reads complete gRPC messages from the underlying reader. type parser struct { // r is the underlying reader. // See the comment on recvMsg for the permissible // error types. - r io.Reader + r streamReader // The header of a gRPC message. Find more detail at // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md header [5]byte - // recvBufferPool is the pool of shared receive buffers. - recvBufferPool SharedBufferPool + // bufferPool is the pool of shared receive buffers. + bufferPool mem.BufferPool } // recvMsg reads a complete gRPC message from the stream. @@ -608,14 +653,15 @@ type parser struct { // - an error from the status package // // No other error values or types must be returned, which also means -// that the underlying io.Reader must not return an incompatible +// that the underlying streamReader must not return an incompatible // error. -func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) { - if _, err := p.r.Read(p.header[:]); err != nil { +func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSlice, error) { + err := p.r.ReadHeader(p.header[:]) + if err != nil { return 0, nil, err } - pf = payloadFormat(p.header[0]) + pf := payloadFormat(p.header[0]) length := binary.BigEndian.Uint32(p.header[1:]) if length == 0 { @@ -627,20 +673,21 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt if int(length) > maxReceiveMessageSize { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) } - msg = p.recvBufferPool.Get(int(length)) - if _, err := p.r.Read(msg); err != nil { + + data, err := p.r.Read(int(length)) + if err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF } return 0, nil, err } - return pf, msg, nil + return pf, data, nil } // encode serializes msg and returns a buffer containing the message, or an // error if it is too large to be transmitted by grpc. If msg is nil, it // generates an empty message. -func encode(c baseCodec, msg any) ([]byte, error) { +func encode(c baseCodec, msg any) (mem.BufferSlice, error) { if msg == nil { // NOTE: typed nils will not be caught by this check return nil, nil } @@ -648,7 +695,8 @@ func encode(c baseCodec, msg any) ([]byte, error) { if err != nil { return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) } - if uint(len(b)) > math.MaxUint32 { + if uint(b.Len()) > math.MaxUint32 { + b.Free() return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) } return b, nil @@ -659,34 +707,41 @@ func encode(c baseCodec, msg any) ([]byte, error) { // indicating no compression was done. // // TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. -func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { - if compressor == nil && cp == nil { - return nil, nil - } - if len(in) == 0 { - return nil, nil +func compress(in mem.BufferSlice, cp Compressor, compressor encoding.Compressor, pool mem.BufferPool) (mem.BufferSlice, payloadFormat, error) { + if (compressor == nil && cp == nil) || in.Len() == 0 { + return nil, compressionNone, nil } + var out mem.BufferSlice + w := mem.NewWriter(&out, pool) wrapErr := func(err error) error { + out.Free() return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) } - cbuf := &bytes.Buffer{} if compressor != nil { - z, err := compressor.Compress(cbuf) + z, err := compressor.Compress(w) if err != nil { - return nil, wrapErr(err) + return nil, 0, wrapErr(err) } - if _, err := z.Write(in); err != nil { - return nil, wrapErr(err) + for _, b := range in { + if _, err := z.Write(b.ReadOnlyData()); err != nil { + return nil, 0, wrapErr(err) + } } if err := z.Close(); err != nil { - return nil, wrapErr(err) + return nil, 0, wrapErr(err) } } else { - if err := cp.Do(cbuf, in); err != nil { - return nil, wrapErr(err) + // This is obviously really inefficient since it fully materializes the data, but + // there is no way around this with the old Compressor API. At least it attempts + // to return the buffer to the provider, in the hopes it can be reused (maybe + // even by a subsequent call to this very function). + buf := in.MaterializeToBuffer(pool) + defer buf.Free() + if err := cp.Do(w, buf.ReadOnlyData()); err != nil { + return nil, 0, wrapErr(err) } } - return cbuf.Bytes(), nil + return out, compressionMade, nil } const ( @@ -697,33 +752,36 @@ const ( // msgHeader returns a 5-byte header for the message being transmitted and the // payload, which is compData if non-nil or data otherwise. -func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { +func msgHeader(data, compData mem.BufferSlice, pf payloadFormat) (hdr []byte, payload mem.BufferSlice) { hdr = make([]byte, headerLen) - if compData != nil { - hdr[0] = byte(compressionMade) - data = compData + hdr[0] = byte(pf) + + var length uint32 + if pf.isCompressed() { + length = uint32(compData.Len()) + payload = compData } else { - hdr[0] = byte(compressionNone) + length = uint32(data.Len()) + payload = data } // Write length of payload into buf - binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data))) - return hdr, data + binary.BigEndian.PutUint32(hdr[payloadLen:], length) + return hdr, payload } -func outPayload(client bool, msg any, data, payload []byte, t time.Time) *stats.OutPayload { +func outPayload(client bool, msg any, dataLength, payloadLength int, t time.Time) *stats.OutPayload { return &stats.OutPayload{ Client: client, Payload: msg, - Data: data, - Length: len(data), - WireLength: len(payload) + headerLen, - CompressedLength: len(payload), + Length: dataLength, + WireLength: payloadLength + headerLen, + CompressedLength: payloadLength, SentTime: t, } } -func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status { +func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool, isServer bool) *status.Status { switch pf { case compressionNone: case compressionMade: @@ -731,7 +789,11 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding") } if !haveCompressor { - return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + if isServer { + return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } else { + return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } } default: return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) @@ -741,104 +803,129 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool type payloadInfo struct { compressedLength int // The compressed length got from wire. - uncompressedBytes []byte + uncompressedBytes mem.BufferSlice +} + +func (p *payloadInfo) free() { + if p != nil && p.uncompressedBytes != nil { + p.uncompressedBytes.Free() + } } // recvAndDecompress reads a message from the stream, decompressing it if necessary. // // Cancelling the returned cancel function releases the buffer back to the pool. So the caller should cancel as soon as // the buffer is no longer needed. -func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, -) (uncompressedBuf []byte, cancel func(), err error) { - pf, compressedBuf, err := p.recvMsg(maxReceiveMessageSize) +// TODO: Refactor this function to reduce the number of arguments. +// See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists +func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, +) (out mem.BufferSlice, err error) { + pf, compressed, err := p.recvMsg(maxReceiveMessageSize) if err != nil { - return nil, nil, err + return nil, err } - if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { - return nil, nil, st.Err() + compressedLength := compressed.Len() + + if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil, isServer); st != nil { + compressed.Free() + return nil, st.Err() } var size int - if pf == compressionMade { + if pf.isCompressed() { + defer compressed.Free() + // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, // use this decompressor as the default. if dc != nil { - uncompressedBuf, err = dc.Do(bytes.NewReader(compressedBuf)) + var uncompressedBuf []byte + uncompressedBuf, err = dc.Do(compressed.Reader()) + if err == nil { + out = mem.BufferSlice{mem.NewBuffer(&uncompressedBuf, nil)} + } size = len(uncompressedBuf) } else { - uncompressedBuf, size, err = decompress(compressor, compressedBuf, maxReceiveMessageSize) + out, size, err = decompress(compressor, compressed, maxReceiveMessageSize, p.bufferPool) } if err != nil { - return nil, nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) } if size > maxReceiveMessageSize { + out.Free() // TODO: Revisit the error code. Currently keep it consistent with java // implementation. - return nil, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) } } else { - uncompressedBuf = compressedBuf + out = compressed } if payInfo != nil { - payInfo.compressedLength = len(compressedBuf) - payInfo.uncompressedBytes = uncompressedBuf - - cancel = func() {} - } else { - cancel = func() { - p.recvBufferPool.Put(&compressedBuf) - } + payInfo.compressedLength = compressedLength + out.Ref() + payInfo.uncompressedBytes = out } - return uncompressedBuf, cancel, nil + return out, nil } // Using compressor, decompress d, returning data and size. // Optionally, if data will be over maxReceiveMessageSize, just return the size. -func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) { - dcReader, err := compressor.Decompress(bytes.NewReader(d)) +func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMessageSize int, pool mem.BufferPool) (mem.BufferSlice, int, error) { + dcReader, err := compressor.Decompress(d.Reader()) if err != nil { return nil, 0, err } - if sizer, ok := compressor.(interface { - DecompressedSize(compressedBytes []byte) int - }); ok { - if size := sizer.DecompressedSize(d); size >= 0 { - if size > maxReceiveMessageSize { - return nil, size, nil - } - // size is used as an estimate to size the buffer, but we - // will read more data if available. - // +MinRead so ReadFrom will not reallocate if size is correct. - // - // TODO: If we ensure that the buffer size is the same as the DecompressedSize, - // we can also utilize the recv buffer pool here. - buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) - bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - return buf.Bytes(), int(bytesRead), err - } + + // TODO: Can/should this still be preserved with the new BufferSlice API? Are + // there any actual benefits to allocating a single large buffer instead of + // multiple smaller ones? + //if sizer, ok := compressor.(interface { + // DecompressedSize(compressedBytes []byte) int + //}); ok { + // if size := sizer.DecompressedSize(d); size >= 0 { + // if size > maxReceiveMessageSize { + // return nil, size, nil + // } + // // size is used as an estimate to size the buffer, but we + // // will read more data if available. + // // +MinRead so ReadFrom will not reallocate if size is correct. + // // + // // TODO: If we ensure that the buffer size is the same as the DecompressedSize, + // // we can also utilize the recv buffer pool here. + // buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) + // bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + // return buf.Bytes(), int(bytesRead), err + // } + //} + + var out mem.BufferSlice + _, err = io.Copy(mem.NewWriter(&out, pool), io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + if err != nil { + out.Free() + return nil, 0, err } - // Read from LimitReader with limit max+1. So if the underlying - // reader is over limit, the result will be bigger than max. - d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - return d, len(d), err + return out, out.Len(), nil } // For the two compressor parameters, both should not be set, but if they are, // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { - buf, cancel, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error { + data, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor, isServer) if err != nil { return err } - defer cancel() - if err := c.Unmarshal(buf, m); err != nil { + // If the codec wants its own reference to the data, it can get it. Otherwise, always + // free the buffers. + defer data.Free() + + if err := c.Unmarshal(data, m); err != nil { return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) } + return nil } @@ -941,7 +1028,7 @@ func setCallInfoCodec(c *callInfo) error { // encoding.Codec (Name vs. String method name). We only support // setting content subtype from encoding.Codec to avoid a behavior // change with the deprecated version. - if ec, ok := c.codec.(encoding.Codec); ok { + if ec, ok := c.codec.(encoding.CodecV2); ok { c.contentSubtype = strings.ToLower(ec.Name()) } } @@ -950,12 +1037,12 @@ func setCallInfoCodec(c *callInfo) error { if c.contentSubtype == "" { // No codec specified in CallOptions; use proto by default. - c.codec = encoding.GetCodec(proto.Name) + c.codec = getCodec(proto.Name) return nil } // c.contentSubtype is already lowercased in CallContentSubtype - c.codec = encoding.GetCodec(c.contentSubtype) + c.codec = getCodec(c.contentSubtype) if c.codec == nil { return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype) } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 89f8e4792bf1..d1e1415a40f9 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -45,6 +45,7 @@ import ( "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -80,7 +81,7 @@ func init() { } internal.BinaryLogger = binaryLogger internal.JoinServerOptions = newJoinServerOption - internal.RecvBufferPool = recvBufferPool + internal.BufferPool = bufferPool } var statusOK = status.New(codes.OK, "") @@ -170,7 +171,7 @@ type serverOptions struct { maxHeaderListSize *uint32 headerTableSize *uint32 numServerWorkers uint32 - recvBufferPool SharedBufferPool + bufferPool mem.BufferPool waitForHandlers bool } @@ -181,7 +182,7 @@ var defaultServerOptions = serverOptions{ connectionTimeout: 120 * time.Second, writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, - recvBufferPool: nopBufferPool{}, + bufferPool: mem.DefaultBufferPool(), } var globalServerOptions []ServerOption @@ -313,7 +314,7 @@ func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { // Will be supported throughout 1.x. func CustomCodec(codec Codec) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.codec = codec + o.codec = newCodecV0Bridge(codec) }) } @@ -342,7 +343,22 @@ func CustomCodec(codec Codec) ServerOption { // later release. func ForceServerCodec(codec encoding.Codec) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.codec = codec + o.codec = newCodecV1Bridge(codec) + }) +} + +// ForceServerCodecV2 is the equivalent of ForceServerCodec, but for the new +// CodecV2 interface. +// +// Will be supported throughout 1.x. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceServerCodecV2(codecV2 encoding.CodecV2) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = codecV2 }) } @@ -592,26 +608,9 @@ func WaitForHandlers(w bool) ServerOption { }) } -// RecvBufferPool returns a ServerOption that configures the server -// to use the provided shared buffer pool for parsing incoming messages. Depending -// on the application's workload, this could result in reduced memory allocation. -// -// If you are unsure about how to implement a memory pool but want to utilize one, -// begin with grpc.NewSharedBufferPool. -// -// Note: The shared buffer pool feature will not be active if any of the following -// options are used: StatsHandler, EnableTracing, or binary logging. In such -// cases, the shared buffer pool will be ignored. -// -// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in -// v1.60.0 or later. -func RecvBufferPool(bufferPool SharedBufferPool) ServerOption { - return recvBufferPool(bufferPool) -} - -func recvBufferPool(bufferPool SharedBufferPool) ServerOption { +func bufferPool(bufferPool mem.BufferPool) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.recvBufferPool = bufferPool + o.bufferPool = bufferPool }) } @@ -622,7 +621,7 @@ func recvBufferPool(bufferPool SharedBufferPool) ServerOption { // workload (assuming a QPS of a few thousand requests/sec). const serverWorkerResetThreshold = 1 << 16 -// serverWorkers blocks on a *transport.Stream channel forever and waits for +// serverWorker blocks on a *transport.Stream channel forever and waits for // data to be fed by serveStreams. This allows multiple requests to be // processed by the same goroutine, removing the need for expensive stack // re-allocations (see the runtime.morestack problem [1]). @@ -980,6 +979,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { ChannelzParent: s.channelz, MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, + BufferPool: s.opts.bufferPool, } st, err := transport.NewServerTransport(c, config) if err != nil { @@ -1072,7 +1072,7 @@ var _ http.Handler = (*Server)(nil) // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers, s.opts.bufferPool) if err != nil { // Errors returned from transport.NewServerHandlerTransport have // already been written to w. @@ -1142,20 +1142,35 @@ func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err) return err } - compData, err := compress(data, cp, comp) + + compData, pf, err := compress(data, cp, comp, s.opts.bufferPool) if err != nil { + data.Free() channelz.Error(logger, s.channelz, "grpc: server failed to compress response: ", err) return err } - hdr, payload := msgHeader(data, compData) + + hdr, payload := msgHeader(data, compData, pf) + + defer func() { + compData.Free() + data.Free() + // payload does not need to be freed here, it is either data or compData, both of + // which are already freed. + }() + + dataLen := data.Len() + payloadLen := payload.Len() // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > s.opts.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) + if payloadLen > s.opts.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize) } err = t.Write(stream, hdr, payload, opts) if err == nil { - for _, sh := range s.opts.statsHandlers { - sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now())) + if len(s.opts.statsHandlers) != 0 { + for _, sh := range s.opts.statsHandlers { + sh.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now())) + } } } return err @@ -1334,37 +1349,37 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor var payInfo *payloadInfo if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} + defer payInfo.free() } - d, cancel, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true) if err != nil { if e := t.WriteStatus(stream, status.Convert(err)); e != nil { channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } return err } + defer d.Free() if channelz.IsOn() { t.IncrMsgRecv() } df := func(v any) error { - defer cancel() - if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } + for _, sh := range shs { sh.HandleRPC(ctx, &stats.InPayload{ RecvTime: time.Now(), Payload: v, - Length: len(d), + Length: d.Len(), WireLength: payInfo.compressedLength + headerLen, CompressedLength: payInfo.compressedLength, - Data: d, }) } if len(binlogs) != 0 { cm := &binarylog.ClientMessage{ - Message: d, + Message: d.Materialize(), } for _, binlog := range binlogs { binlog.Log(ctx, cm) @@ -1548,7 +1563,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran ctx: ctx, t: t, s: stream, - p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, + p: &parser{r: stream, bufferPool: s.opts.bufferPool}, codec: s.getCodec(stream.ContentSubtype()), maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, @@ -1963,12 +1978,12 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { return s.opts.codec } if contentSubtype == "" { - return encoding.GetCodec(proto.Name) + return getCodec(proto.Name) } - codec := encoding.GetCodec(contentSubtype) + codec := getCodec(contentSubtype) if codec == nil { logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name) - return encoding.GetCodec(proto.Name) + return getCodec(proto.Name) } return codec } diff --git a/vendor/google.golang.org/grpc/shared_buffer_pool.go b/vendor/google.golang.org/grpc/shared_buffer_pool.go deleted file mode 100644 index 48a64cfe8e25..000000000000 --- a/vendor/google.golang.org/grpc/shared_buffer_pool.go +++ /dev/null @@ -1,154 +0,0 @@ -/* - * - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import "sync" - -// SharedBufferPool is a pool of buffers that can be shared, resulting in -// decreased memory allocation. Currently, in gRPC-go, it is only utilized -// for parsing incoming messages. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -type SharedBufferPool interface { - // Get returns a buffer with specified length from the pool. - // - // The returned byte slice may be not zero initialized. - Get(length int) []byte - - // Put returns a buffer to the pool. - Put(*[]byte) -} - -// NewSharedBufferPool creates a simple SharedBufferPool with buckets -// of different sizes to optimize memory usage. This prevents the pool from -// wasting large amounts of memory, even when handling messages of varying sizes. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func NewSharedBufferPool() SharedBufferPool { - return &simpleSharedBufferPool{ - pools: [poolArraySize]simpleSharedBufferChildPool{ - newBytesPool(level0PoolMaxSize), - newBytesPool(level1PoolMaxSize), - newBytesPool(level2PoolMaxSize), - newBytesPool(level3PoolMaxSize), - newBytesPool(level4PoolMaxSize), - newBytesPool(0), - }, - } -} - -// simpleSharedBufferPool is a simple implementation of SharedBufferPool. -type simpleSharedBufferPool struct { - pools [poolArraySize]simpleSharedBufferChildPool -} - -func (p *simpleSharedBufferPool) Get(size int) []byte { - return p.pools[p.poolIdx(size)].Get(size) -} - -func (p *simpleSharedBufferPool) Put(bs *[]byte) { - p.pools[p.poolIdx(cap(*bs))].Put(bs) -} - -func (p *simpleSharedBufferPool) poolIdx(size int) int { - switch { - case size <= level0PoolMaxSize: - return level0PoolIdx - case size <= level1PoolMaxSize: - return level1PoolIdx - case size <= level2PoolMaxSize: - return level2PoolIdx - case size <= level3PoolMaxSize: - return level3PoolIdx - case size <= level4PoolMaxSize: - return level4PoolIdx - default: - return levelMaxPoolIdx - } -} - -const ( - level0PoolMaxSize = 16 // 16 B - level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B - level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB - level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB - level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB -) - -const ( - level0PoolIdx = iota - level1PoolIdx - level2PoolIdx - level3PoolIdx - level4PoolIdx - levelMaxPoolIdx - poolArraySize -) - -type simpleSharedBufferChildPool interface { - Get(size int) []byte - Put(any) -} - -type bufferPool struct { - sync.Pool - - defaultSize int -} - -func (p *bufferPool) Get(size int) []byte { - bs := p.Pool.Get().(*[]byte) - - if cap(*bs) < size { - p.Pool.Put(bs) - - return make([]byte, size) - } - - return (*bs)[:size] -} - -func newBytesPool(size int) simpleSharedBufferChildPool { - return &bufferPool{ - Pool: sync.Pool{ - New: func() any { - bs := make([]byte, size) - return &bs - }, - }, - defaultSize: size, - } -} - -// nopBufferPool is a buffer pool just makes new buffer without pooling. -type nopBufferPool struct { -} - -func (nopBufferPool) Get(length int) []byte { - return make([]byte, length) -} - -func (nopBufferPool) Put(*[]byte) { -} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index fdb0bd65182c..71195c4943d7 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -77,9 +77,6 @@ type InPayload struct { // the call to HandleRPC which provides the InPayload returns and must be // copied if needed later. Payload any - // Data is the serialized message payload. - // Deprecated: Data will be removed in the next release. - Data []byte // Length is the size of the uncompressed payload data. Does not include any // framing (gRPC or HTTP/2). @@ -150,9 +147,6 @@ type OutPayload struct { // the call to HandleRPC which provides the OutPayload returns and must be // copied if needed later. Payload any - // Data is the serialized message payload. - // Deprecated: Data will be removed in the next release. - Data []byte // Length is the size of the uncompressed payload data. Does not include any // framing (gRPC or HTTP/2). Length int diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 8051ef5b514a..bb2b2a216ce2 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -41,6 +41,7 @@ import ( "google.golang.org/grpc/internal/serviceconfig" istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -359,7 +360,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client cs.attempt = a return nil } - if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { + if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }); err != nil { return nil, err } @@ -517,7 +518,7 @@ func (a *csAttempt) newStream() error { } a.s = s a.ctx = s.Context() - a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool} + a.p = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool} return nil } @@ -566,10 +567,15 @@ type clientStream struct { // place where we need to check if the attempt is nil. attempt *csAttempt // TODO(hedging): hedging will have multiple attempts simultaneously. - committed bool // active attempt committed for retry? - onCommit func() - buffer []func(a *csAttempt) error // operations to replay on retry - bufferSize int // current size of buffer + committed bool // active attempt committed for retry? + onCommit func() + replayBuffer []replayOp // operations to replay on retry + replayBufferSize int // current size of replayBuffer +} + +type replayOp struct { + op func(a *csAttempt) error + cleanup func() } // csAttempt implements a single transport stream attempt within a @@ -607,7 +613,12 @@ func (cs *clientStream) commitAttemptLocked() { cs.onCommit() } cs.committed = true - cs.buffer = nil + for _, op := range cs.replayBuffer { + if op.cleanup != nil { + op.cleanup() + } + } + cs.replayBuffer = nil } func (cs *clientStream) commitAttempt() { @@ -732,7 +743,7 @@ func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error { // the stream is canceled. return err } - // Note that the first op in the replay buffer always sets cs.attempt + // Note that the first op in replayBuffer always sets cs.attempt // if it is able to pick a transport and create a stream. if lastErr = cs.replayBufferLocked(attempt); lastErr == nil { return nil @@ -761,7 +772,7 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) // already be status errors. return toRPCErr(op(cs.attempt)) } - if len(cs.buffer) == 0 { + if len(cs.replayBuffer) == 0 { // For the first op, which controls creation of the stream and // assigns cs.attempt, we need to create a new attempt inline // before executing the first op. On subsequent ops, the attempt @@ -851,25 +862,26 @@ func (cs *clientStream) Trailer() metadata.MD { } func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { - for _, f := range cs.buffer { - if err := f(attempt); err != nil { + for _, f := range cs.replayBuffer { + if err := f.op(attempt); err != nil { return err } } return nil } -func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) { +func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error, cleanup func()) { // Note: we still will buffer if retry is disabled (for transparent retries). if cs.committed { return } - cs.bufferSize += sz - if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize { + cs.replayBufferSize += sz + if cs.replayBufferSize > cs.callInfo.maxRetryRPCBufferSize { cs.commitAttemptLocked() + cleanup() return } - cs.buffer = append(cs.buffer, op) + cs.replayBuffer = append(cs.replayBuffer, replayOp{op: op, cleanup: cleanup}) } func (cs *clientStream) SendMsg(m any) (err error) { @@ -891,23 +903,50 @@ func (cs *clientStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp) + hdr, data, payload, pf, err := prepareMsg(m, cs.codec, cs.cp, cs.comp, cs.cc.dopts.copts.BufferPool) if err != nil { return err } + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + + dataLen := data.Len() + payloadLen := payload.Len() // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > *cs.callInfo.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) + if payloadLen > *cs.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, *cs.callInfo.maxSendMessageSize) } + + // always take an extra ref in case data == payload (i.e. when the data isn't + // compressed). The original ref will always be freed by the deferred free above. + payload.Ref() op := func(a *csAttempt) error { - return a.sendMsg(m, hdr, payload, data) + return a.sendMsg(m, hdr, payload, dataLen, payloadLen) + } + + // onSuccess is invoked when the op is captured for a subsequent retry. If the + // stream was established by a previous message and therefore retries are + // disabled, onSuccess will not be invoked, and payloadRef can be freed + // immediately. + onSuccessCalled := false + err = cs.withRetry(op, func() { + cs.bufferForRetryLocked(len(hdr)+payloadLen, op, payload.Free) + onSuccessCalled = true + }) + if !onSuccessCalled { + payload.Free() } - err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) if len(cs.binlogs) != 0 && err == nil { cm := &binarylog.ClientMessage{ OnClientSide: true, - Message: data, + Message: data.Materialize(), } for _, binlog := range cs.binlogs { binlog.Log(cs.ctx, cm) @@ -924,6 +963,7 @@ func (cs *clientStream) RecvMsg(m any) error { var recvInfo *payloadInfo if len(cs.binlogs) != 0 { recvInfo = &payloadInfo{} + defer recvInfo.free() } err := cs.withRetry(func(a *csAttempt) error { return a.recvMsg(m, recvInfo) @@ -931,7 +971,7 @@ func (cs *clientStream) RecvMsg(m any) error { if len(cs.binlogs) != 0 && err == nil { sm := &binarylog.ServerMessage{ OnClientSide: true, - Message: recvInfo.uncompressedBytes, + Message: recvInfo.uncompressedBytes.Materialize(), } for _, binlog := range cs.binlogs { binlog.Log(cs.ctx, sm) @@ -958,7 +998,7 @@ func (cs *clientStream) CloseSend() error { // RecvMsg. This also matches historical behavior. return nil } - cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) + cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }) if len(cs.binlogs) != 0 { chc := &binarylog.ClientHalfClose{ OnClientSide: true, @@ -1034,7 +1074,7 @@ func (cs *clientStream) finish(err error) { cs.cancel() } -func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { +func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength, payloadLength int) error { cs := a.cs if a.trInfo != nil { a.mu.Lock() @@ -1052,8 +1092,10 @@ func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { } return io.EOF } - for _, sh := range a.statsHandlers { - sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) + if len(a.statsHandlers) != 0 { + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now())) + } } if channelz.IsOn() { a.t.IncrMsgSent() @@ -1065,6 +1107,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { cs := a.cs if len(a.statsHandlers) != 0 && payInfo == nil { payInfo = &payloadInfo{} + defer payInfo.free() } if !a.decompSet { @@ -1083,8 +1126,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { // Only initialize this state once per stream. a.decompSet = true } - err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp) - if err != nil { + if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp, false); err != nil { if err == io.EOF { if statusErr := a.s.Status().Err(); statusErr != nil { return statusErr @@ -1103,14 +1145,12 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { } for _, sh := range a.statsHandlers { sh.HandleRPC(a.ctx, &stats.InPayload{ - Client: true, - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, + Client: true, + RecvTime: time.Now(), + Payload: m, WireLength: payInfo.compressedLength + headerLen, CompressedLength: payInfo.compressedLength, - Length: len(payInfo.uncompressedBytes), + Length: payInfo.uncompressedBytes.Len(), }) } if channelz.IsOn() { @@ -1122,14 +1162,12 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { } // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp) - if err == nil { - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) - } - if err == io.EOF { + if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp, false); err == io.EOF { return a.s.Status().Err() // non-server streaming Recv returns nil on success + } else if err != nil { + return toRPCErr(err) } - return toRPCErr(err) + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) } func (a *csAttempt) finish(err error) { @@ -1185,12 +1223,12 @@ func (a *csAttempt) finish(err error) { a.mu.Unlock() } -// newClientStream creates a ClientStream with the specified transport, on the +// newNonRetryClientStream creates a ClientStream with the specified transport, on the // given addrConn. // // It's expected that the given transport is either the same one in addrConn, or // is already closed. To avoid race, transport is specified separately, instead -// of using ac.transpot. +// of using ac.transport. // // Main difference between this and ClientConn.NewStream: // - no retry @@ -1276,7 +1314,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin return nil, err } as.s = s - as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool} + as.p = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool} ac.incrCallsStarted() if desc != unaryStreamDesc { // Listen on stream context to cleanup when the stream context is @@ -1373,17 +1411,26 @@ func (as *addrConnStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp) + hdr, data, payload, pf, err := prepareMsg(m, as.codec, as.cp, as.comp, as.ac.dopts.copts.BufferPool) if err != nil { return err } + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + // TODO(dfawley): should we be checking len(data) instead? - if len(payld) > *as.callInfo.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize) + if payload.Len() > *as.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize) } - if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { + if err := as.t.Write(as.s, hdr, payload, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { if !as.desc.ClientStreams { // For non-client-streaming RPCs, we return nil instead of EOF on error // because the generated code requires it. finish is not called; RecvMsg() @@ -1423,8 +1470,7 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Only initialize this state once per stream. as.decompSet = true } - err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) - if err != nil { + if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err != nil { if err == io.EOF { if statusErr := as.s.Status().Err(); statusErr != nil { return statusErr @@ -1444,14 +1490,12 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) - if err == nil { - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) - } - if err == io.EOF { + if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err == io.EOF { return as.s.Status().Err() // non-server streaming Recv returns nil on success + } else if err != nil { + return toRPCErr(err) } - return toRPCErr(err) + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) } func (as *addrConnStream) finish(err error) { @@ -1645,18 +1689,31 @@ func (ss *serverStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) + hdr, data, payload, pf, err := prepareMsg(m, ss.codec, ss.cp, ss.comp, ss.p.bufferPool) if err != nil { return err } + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + + dataLen := data.Len() + payloadLen := payload.Len() + // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > ss.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize) + if payloadLen > ss.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, ss.maxSendMessageSize) } if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { return toRPCErr(err) } + if len(ss.binlogs) != 0 { if !ss.serverHeaderBinlogged { h, _ := ss.s.Header() @@ -1669,7 +1726,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { } } sm := &binarylog.ServerMessage{ - Message: data, + Message: data.Materialize(), } for _, binlog := range ss.binlogs { binlog.Log(ss.ctx, sm) @@ -1677,7 +1734,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { } if len(ss.statsHandler) != 0 { for _, sh := range ss.statsHandler { - sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + sh.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now())) } } return nil @@ -1714,8 +1771,9 @@ func (ss *serverStream) RecvMsg(m any) (err error) { var payInfo *payloadInfo if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { payInfo = &payloadInfo{} + defer payInfo.free() } - if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp, true); err != nil { if err == io.EOF { if len(ss.binlogs) != 0 { chc := &binarylog.ClientHalfClose{} @@ -1733,11 +1791,9 @@ func (ss *serverStream) RecvMsg(m any) (err error) { if len(ss.statsHandler) != 0 { for _, sh := range ss.statsHandler { sh.HandleRPC(ss.s.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - Length: len(payInfo.uncompressedBytes), + RecvTime: time.Now(), + Payload: m, + Length: payInfo.uncompressedBytes.Len(), WireLength: payInfo.compressedLength + headerLen, CompressedLength: payInfo.compressedLength, }) @@ -1745,7 +1801,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } if len(ss.binlogs) != 0 { cm := &binarylog.ClientMessage{ - Message: payInfo.uncompressedBytes, + Message: payInfo.uncompressedBytes.Materialize(), } for _, binlog := range ss.binlogs { binlog.Log(ss.ctx, cm) @@ -1760,23 +1816,26 @@ func MethodFromServerStream(stream ServerStream) (string, bool) { return Method(stream.Context()) } -// prepareMsg returns the hdr, payload and data -// using the compressors passed or using the -// passed preparedmsg -func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { +// prepareMsg returns the hdr, payload and data using the compressors passed or +// using the passed preparedmsg. The returned boolean indicates whether +// compression was made and therefore whether the payload needs to be freed in +// addition to the returned data. Freeing the payload if the returned boolean is +// false can lead to undefined behavior. +func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor, pool mem.BufferPool) (hdr []byte, data, payload mem.BufferSlice, pf payloadFormat, err error) { if preparedMsg, ok := m.(*PreparedMsg); ok { - return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil + return preparedMsg.hdr, preparedMsg.encodedData, preparedMsg.payload, preparedMsg.pf, nil } // The input interface is not a prepared msg. // Marshal and Compress the data at this point data, err = encode(codec, m) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, 0, err } - compData, err := compress(data, cp, comp) + compData, pf, err := compress(data, cp, comp, pool) if err != nil { - return nil, nil, nil, err + data.Free() + return nil, nil, nil, 0, err } - hdr, payload = msgHeader(data, compData) - return hdr, payload, data, nil + hdr, payload = msgHeader(data, compData, pf) + return hdr, data, payload, pf, nil } diff --git a/vendor/google.golang.org/grpc/stream_interfaces.go b/vendor/google.golang.org/grpc/stream_interfaces.go index 8b813529c0cc..0037fee0bd71 100644 --- a/vendor/google.golang.org/grpc/stream_interfaces.go +++ b/vendor/google.golang.org/grpc/stream_interfaces.go @@ -22,15 +22,35 @@ package grpc // request, many responses) RPC. It is generic over the type of the response // message. It is used in generated code. type ServerStreamingClient[Res any] interface { + // Recv receives the next response message from the server. The client may + // repeatedly call Recv to read messages from the response stream. If + // io.EOF is returned, the stream has terminated with an OK status. Any + // other error is compatible with the status package and indicates the + // RPC's status code and message. Recv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, and Trailer + // functionality. No other methods in the ClientStream should be called + // directly. ClientStream } // ServerStreamingServer represents the server side of a server-streaming (one // request, many responses) RPC. It is generic over the type of the response // message. It is used in generated code. +// +// To terminate the response stream, return from the handler method and return +// an error from the status package, or use nil to indicate an OK status code. type ServerStreamingServer[Res any] interface { + // Send sends a response message to the client. The server handler may + // call Send multiple times to send multiple messages to the client. An + // error is returned if the stream was terminated unexpectedly, and the + // handler method should return, as the stream is no longer usable. Send(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. ServerStream } @@ -39,8 +59,22 @@ type ServerStreamingServer[Res any] interface { // message stream and the type of the unary response message. It is used in // generated code. type ClientStreamingClient[Req any, Res any] interface { + // Send sends a request message to the server. The client may call Send + // multiple times to send multiple messages to the server. On error, Send + // aborts the stream. If the error was generated by the client, the status + // is returned directly. Otherwise, io.EOF is returned, and the status of + // the stream may be discovered using CloseAndRecv(). Send(*Req) error + + // CloseAndRecv closes the request stream and waits for the server's + // response. This method must be called once and only once after sending + // all request messages. Any error returned is implemented by the status + // package. CloseAndRecv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, and Trailer + // functionality. No other methods in the ClientStream should be called + // directly. ClientStream } @@ -48,9 +82,28 @@ type ClientStreamingClient[Req any, Res any] interface { // requests, one response) RPC. It is generic over both the type of the request // message stream and the type of the unary response message. It is used in // generated code. +// +// To terminate the RPC, call SendAndClose and return nil from the method +// handler or do not call SendAndClose and return an error from the status +// package. type ClientStreamingServer[Req any, Res any] interface { + // Recv receives the next request message from the client. The server may + // repeatedly call Recv to read messages from the request stream. If + // io.EOF is returned, it indicates the client called CloseAndRecv on its + // ClientStreamingClient. Any other error indicates the stream was + // terminated unexpectedly, and the handler method should return, as the + // stream is no longer usable. Recv() (*Req, error) + + // SendAndClose sends a single response message to the client and closes + // the stream. This method must be called once and only once after all + // request messages have been processed. Recv should not be called after + // calling SendAndClose. SendAndClose(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. ServerStream } @@ -59,8 +112,23 @@ type ClientStreamingServer[Req any, Res any] interface { // request message stream and the type of the response message stream. It is // used in generated code. type BidiStreamingClient[Req any, Res any] interface { + // Send sends a request message to the server. The client may call Send + // multiple times to send multiple messages to the server. On error, Send + // aborts the stream. If the error was generated by the client, the status + // is returned directly. Otherwise, io.EOF is returned, and the status of + // the stream may be discovered using Recv(). Send(*Req) error + + // Recv receives the next response message from the server. The client may + // repeatedly call Recv to read messages from the response stream. If + // io.EOF is returned, the stream has terminated with an OK status. Any + // other error is compatible with the status package and indicates the + // RPC's status code and message. Recv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, Trailer, and + // CloseSend functionality. No other methods in the ClientStream should be + // called directly. ClientStream } @@ -68,9 +136,27 @@ type BidiStreamingClient[Req any, Res any] interface { // (many requests, many responses) RPC. It is generic over both the type of the // request message stream and the type of the response message stream. It is // used in generated code. +// +// To terminate the stream, return from the handler method and return +// an error from the status package, or use nil to indicate an OK status code. type BidiStreamingServer[Req any, Res any] interface { + // Recv receives the next request message from the client. The server may + // repeatedly call Recv to read messages from the request stream. If + // io.EOF is returned, it indicates the client called CloseSend on its + // BidiStreamingClient. Any other error indicates the stream was + // terminated unexpectedly, and the handler method should return, as the + // stream is no longer usable. Recv() (*Req, error) + + // Send sends a response message to the client. The server handler may + // call Send multiple times to send multiple messages to the client. An + // error is returned if the stream was terminated unexpectedly, and the + // handler method should return, as the stream is no longer usable. Send(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. ServerStream } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index bafaef99be98..187fbf119514 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.65.0" +const Version = "1.67.0" diff --git a/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go b/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go index c3182b63989d..9b7149e48cac 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go +++ b/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go @@ -37,6 +37,8 @@ import ( "github.com/coreos/go-systemd/v22/daemon" "github.com/spf13/cobra" "github.com/spf13/pflag" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/metric/noop" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" jsonpatch "gopkg.in/evanphx/json-patch.v4" @@ -121,6 +123,9 @@ import ( func init() { utilruntime.Must(logsapi.AddFeatureGates(utilfeature.DefaultMutableFeatureGate)) + // Prevent memory leak from OTel metrics, which we don't use: + // https://github.com/open-telemetry/opentelemetry-go-contrib/issues/5190 + otel.SetMeterProvider(noop.NewMeterProvider()) } const ( diff --git a/vendor/k8s.io/kubernetes/openshift-hack/e2e/annotate/generated/zz_generated.annotations.go b/vendor/k8s.io/kubernetes/openshift-hack/e2e/annotate/generated/zz_generated.annotations.go index 82ef80162aeb..ea8d44fdbf5e 100644 --- a/vendor/k8s.io/kubernetes/openshift-hack/e2e/annotate/generated/zz_generated.annotations.go +++ b/vendor/k8s.io/kubernetes/openshift-hack/e2e/annotate/generated/zz_generated.annotations.go @@ -931,9 +931,9 @@ var Annotations = map[string]string{ "[sig-cli] Kubectl exec should be able to execute 1000 times in a container": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-cli] Kubectl logs all pod logs the Deployment has 2 replicas and each pod has 2 containers should get logs from all pods based on default container": " [Disabled:RebaseInProgress] [Suite:k8s]", + "[sig-cli] Kubectl logs all pod logs the Deployment has 2 replicas and each pod has 2 containers should get logs from all pods based on default container": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-cli] Kubectl logs all pod logs the Deployment has 2 replicas and each pod has 2 containers should get logs from each pod and each container in Deployment": " [Disabled:RebaseInProgress] [Suite:k8s]", + "[sig-cli] Kubectl logs all pod logs the Deployment has 2 replicas and each pod has 2 containers should get logs from each pod and each container in Deployment": " [Suite:openshift/conformance/parallel] [Suite:k8s]", "[sig-cli] Kubectl logs default container logs the second container is the default-container by annotation should log default container if not specified": " [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -1143,101 +1143,101 @@ var Annotations = map[string]string{ "[sig-network] Netpol API should support creating NetworkPolicy API with endport field": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should allow egress access on one named port [Feature:NetworkPolicy]": " [Skipped:Network/OVNKubernetes] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should allow egress access on one named port [Feature:NetworkPolicy]": " [Skipped:Network/OVNKubernetes] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should allow egress access to server in CIDR block [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should allow egress access to server in CIDR block [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should allow ingress access from namespace on one named port [Feature:NetworkPolicy]": " [Skipped:Network/OVNKubernetes] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should allow ingress access from namespace on one named port [Feature:NetworkPolicy]": " [Skipped:Network/OVNKubernetes] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should allow ingress access from updated namespace [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should allow ingress access from updated namespace [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should allow ingress access from updated pod [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should allow ingress access from updated pod [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should allow ingress access on one named port [Feature:NetworkPolicy]": " [Skipped:Network/OVNKubernetes] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should allow ingress access on one named port [Feature:NetworkPolicy]": " [Skipped:Network/OVNKubernetes] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should deny egress from all pods in a namespace [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should deny egress from all pods in a namespace [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should deny egress from pods based on PodSelector [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should deny egress from pods based on PodSelector [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should deny ingress access to updated pod [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should deny ingress access to updated pod [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should deny ingress from pods on other namespaces [Feature:NetworkPolicy]": " [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should deny ingress from pods on other namespaces [Feature:NetworkPolicy]": " [Disabled:Broken] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should enforce egress policy allowing traffic to a server in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should enforce egress policy allowing traffic to a server in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should enforce except clause while egress access to server in CIDR block [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should enforce except clause while egress access to server in CIDR block [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should enforce ingress policy allowing any port traffic to a server on a specific protocol [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should enforce ingress policy allowing any port traffic to a server on a specific protocol [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should enforce multiple egress policies with egress allow-all policy taking precedence [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should enforce multiple egress policies with egress allow-all policy taking precedence [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should enforce multiple ingress policies with ingress allow-all policy taking precedence [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should enforce multiple ingress policies with ingress allow-all policy taking precedence [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should enforce multiple, stacked policies with overlapping podSelectors [Feature:NetworkPolicy]": " [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should enforce multiple, stacked policies with overlapping podSelectors [Feature:NetworkPolicy]": " [Disabled:Broken] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should enforce policies to check ingress and egress policies can be controlled independently based on PodSelector [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should enforce policies to check ingress and egress policies can be controlled independently based on PodSelector [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on Multiple PodSelectors and NamespaceSelectors [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on Multiple PodSelectors and NamespaceSelectors [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on NamespaceSelector with MatchExpressions [Feature:NetworkPolicy]": " [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on NamespaceSelector with MatchExpressions [Feature:NetworkPolicy]": " [Disabled:Broken] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on NamespaceSelector with MatchExpressions using default ns label [Feature:NetworkPolicy]": " [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on NamespaceSelector with MatchExpressions using default ns label [Feature:NetworkPolicy]": " [Disabled:Broken] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on PodSelector or NamespaceSelector [Feature:NetworkPolicy]": " [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on PodSelector or NamespaceSelector [Feature:NetworkPolicy]": " [Disabled:Broken] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on PodSelector with MatchExpressions [Feature:NetworkPolicy]": " [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on PodSelector with MatchExpressions [Feature:NetworkPolicy]": " [Disabled:Broken] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on Ports [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on Ports [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on any PodSelectors [Feature:NetworkPolicy]": " [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on any PodSelectors [Feature:NetworkPolicy]": " [Disabled:Broken] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow ingress traffic for a target [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow ingress traffic for a target [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow ingress traffic from pods in all namespaces [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow ingress traffic from pods in all namespaces [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow traffic based on NamespaceSelector with MatchLabels using default ns label [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow traffic based on NamespaceSelector with MatchLabels using default ns label [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow traffic from pods within server namespace based on PodSelector [Feature:NetworkPolicy]": " [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow traffic from pods within server namespace based on PodSelector [Feature:NetworkPolicy]": " [Disabled:Broken] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow traffic only from a different namespace, based on NamespaceSelector [Feature:NetworkPolicy]": " [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow traffic only from a different namespace, based on NamespaceSelector [Feature:NetworkPolicy]": " [Disabled:Broken] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]": " [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]": " [Disabled:Broken] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should enforce updated policy [Feature:NetworkPolicy]": " [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should enforce updated policy [Feature:NetworkPolicy]": " [Disabled:Broken] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should ensure an IP overlapping both IPBlock.CIDR and IPBlock.Except is allowed [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should ensure an IP overlapping both IPBlock.CIDR and IPBlock.Except is allowed [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should not allow access by TCP when a policy specifies only UDP [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should not allow access by TCP when a policy specifies only UDP [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should not mistakenly treat 'protocol: SCTP' as 'protocol: TCP', even if the plugin doesn't support SCTP [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should not mistakenly treat 'protocol: SCTP' as 'protocol: TCP', even if the plugin doesn't support SCTP [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should properly isolate pods that are selected by a policy allowing SCTP, even if the plugin doesn't support SCTP [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should properly isolate pods that are selected by a policy allowing SCTP, even if the plugin doesn't support SCTP [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should stop enforcing policies after they are deleted [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should stop enforcing policies after they are deleted [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should support a 'default-deny-all' policy [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should support a 'default-deny-all' policy [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should support a 'default-deny-ingress' policy [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should support a 'default-deny-ingress' policy [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should support allow-all policy [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should support allow-all policy [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should support denying of egress traffic on the client side (even if the server explicitly allows this traffic) [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should support denying of egress traffic on the client side (even if the server explicitly allows this traffic) [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-network] Netpol NetworkPolicy between server and client should work with Ingress, Egress specified together [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-network] Netpol NetworkPolicy between server and client should work with Ingress, Egress specified together [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-network] Netpol [Feature:SCTPConnectivity] [LinuxOnly] NetworkPolicy between server and client using SCTP should enforce policy based on Ports [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-network] Netpol [Feature:SCTPConnectivity] [LinuxOnly] NetworkPolicy between server and client using SCTP should enforce policy based on Ports [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-network] Netpol [Feature:SCTPConnectivity] [LinuxOnly] NetworkPolicy between server and client using SCTP should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-network] Netpol [Feature:SCTPConnectivity] [LinuxOnly] NetworkPolicy between server and client using SCTP should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-network] Netpol [Feature:SCTPConnectivity] [LinuxOnly] NetworkPolicy between server and client using SCTP should support a 'default-deny-ingress' policy [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-network] Netpol [Feature:SCTPConnectivity] [LinuxOnly] NetworkPolicy between server and client using SCTP should support a 'default-deny-ingress' policy [Feature:NetworkPolicy]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-network] Netpol [LinuxOnly] NetworkPolicy between server and client using UDP should enforce policy based on Ports [Feature:NetworkPolicy]": " [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "[sig-network] Netpol [LinuxOnly] NetworkPolicy between server and client using UDP should enforce policy based on Ports [Feature:NetworkPolicy]": " [Disabled:Broken] [Suite:k8s]", - "[sig-network] Netpol [LinuxOnly] NetworkPolicy between server and client using UDP should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]": " [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "[sig-network] Netpol [LinuxOnly] NetworkPolicy between server and client using UDP should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]": " [Disabled:Broken] [Suite:k8s]", - "[sig-network] Netpol [LinuxOnly] NetworkPolicy between server and client using UDP should support a 'default-deny-ingress' policy [Feature:NetworkPolicy]": " [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "[sig-network] Netpol [LinuxOnly] NetworkPolicy between server and client using UDP should support a 'default-deny-ingress' policy [Feature:NetworkPolicy]": " [Disabled:Broken] [Suite:k8s]", "[sig-network] Networking Granular Checks: Pods should function for intra-pod communication: http [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", @@ -2067,6 +2067,10 @@ var Annotations = map[string]string{ "[sig-node] [Feature:Example] Secret should create a pod that reads a secret": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-node] [Feature:GPUDevicePlugin] Sanity test for Nvidia Device should run gpu based matrix multiplication": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-node] [Feature:GPUDevicePlugin] Sanity test for Nvidia Device should run nvidia-smi cli": " [Disabled:SpecialConfig] [Suite:k8s]", + "[sig-node] [Feature:PodLifecycleSleepAction] when create a pod with lifecycle hook using sleep action ignore terminated container": " [Suite:openshift/conformance/parallel] [Suite:k8s]", "[sig-node] [Feature:PodLifecycleSleepAction] when create a pod with lifecycle hook using sleep action reduce GracePeriodSeconds during runtime": " [Suite:openshift/conformance/parallel] [Suite:k8s]", diff --git a/vendor/k8s.io/kubernetes/openshift-hack/e2e/annotate/rules.go b/vendor/k8s.io/kubernetes/openshift-hack/e2e/annotate/rules.go index 575fa2a56f69..99441e26d651 100644 --- a/vendor/k8s.io/kubernetes/openshift-hack/e2e/annotate/rules.go +++ b/vendor/k8s.io/kubernetes/openshift-hack/e2e/annotate/rules.go @@ -163,10 +163,6 @@ var ( // https://issues.redhat.com/browse/OCPBUGS-17194 `\[sig-node\] ImageCredentialProvider \[Feature:KubeletCredentialProviders\] should be able to create pod with image credentials fetched from external credential provider`, - - // https://issues.redhat.com/browse/OCPBUGS-38838 - `\[sig-cli\] Kubectl logs all pod logs the Deployment has 2 replicas and each pod has 2 containers should get logs from all pods based on default container`, - `\[sig-cli\] Kubectl logs all pod logs the Deployment has 2 replicas and each pod has 2 containers should get logs from each pod and each container in Deployment`, }, // tests that may work, but we don't support them "[Disabled:Unsupported]": { @@ -257,7 +253,7 @@ var ( `\[sig-storage\] Flexvolumes should be mountable`, `\[sig-storage\] Detaching volumes should not work when mount is in progress`, - // We are using openshift-sdn to conceal metadata + // We are using ovn-kubernetes to conceal metadata `\[sig-auth\] Metadata Concealment should run a check-metadata-concealment job to completion`, // https://bugzilla.redhat.com/show_bug.cgi?id=1740959 @@ -331,17 +327,6 @@ var ( `\[Feature:GKELocalSSD\]`, `\[Feature:GKENodePool\]`, }, - // Tests that don't pass under openshift-sdn. - // These are skipped explicitly by openshift-hack/test-kubernetes-e2e.sh, - // but will also be skipped by openshift-tests in jobs that use openshift-sdn. - "[Skipped:Network/OpenShiftSDN]": { - `NetworkPolicy.*IPBlock`, // feature is not supported by openshift-sdn - `NetworkPolicy.*[Ee]gress`, // feature is not supported by openshift-sdn - `NetworkPolicy.*named port`, // feature is not supported by openshift-sdn - - `NetworkPolicy between server and client should support a 'default-deny-all' policy`, // uses egress feature - `NetworkPolicy between server and client should stop enforcing policies after they are deleted`, // uses egress feature - }, // These tests are skipped when openshift-tests needs to use a proxy to reach the // cluster -- either because the test won't work while proxied, or because the test @@ -391,10 +376,6 @@ var ( `\[Feature:StorageProvider\]`, }, - // tests that don't pass under openshift-sdn multitenant mode - "[Skipped:Network/OpenShiftSDN/Multitenant]": { - `\[Feature:NetworkPolicy\]`, // not compatible with multitenant mode - }, // tests that don't pass under OVN Kubernetes "[Skipped:Network/OVNKubernetes]": { // ovn-kubernetes does not support named ports diff --git a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/route/hostassignment/admission.go b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/route/hostassignment/admission.go index f454bd22d564..19327fca63e4 100644 --- a/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/route/hostassignment/admission.go +++ b/vendor/k8s.io/kubernetes/openshift-kube-apiserver/admission/route/hostassignment/admission.go @@ -136,11 +136,6 @@ func (a *hostAssignment) Admit(ctx context.Context, attributes admission.Attribu return errors.NewInvalid(attributes.GetKind().GroupKind(), attributes.GetName(), errs) } - errs = hostassignment.ValidateHostExternalCertificate(ctx, r, old, a.sarClient, a.validationOpts) - if len(errs) > 0 { - return errors.NewInvalid(attributes.GetKind().GroupKind(), attributes.GetName(), errs) - } - errs = hostassignment.ValidateHostUpdate(ctx, r, old, a.sarClient, a.validationOpts) if len(errs) > 0 { return errors.NewInvalid(attributes.GetKind().GroupKind(), attributes.GetName(), errs) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux.go index 1310de57971b..8d9c2f05df28 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux.go @@ -303,6 +303,9 @@ func (m *cgroupCommon) toResources(resourceConfig *ResourceConfig) *libcontainer if resourceConfig.PidsLimit != nil { resources.PidsLimit = *resourceConfig.PidsLimit } + if !resourceConfig.CPUSet.IsEmpty() { + resources.CpusetCpus = resourceConfig.CPUSet.String() + } m.maybeSetHugetlb(resourceConfig, resources) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager.go index affaf97da8c4..23eb74b94aae 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager.go @@ -94,6 +94,10 @@ type Manager interface { // GetCPUAffinity returns cpuset which includes cpus from shared pools // as well as exclusively allocated cpus GetCPUAffinity(podUID, containerName string) cpuset.CPUSet + + // GetAllCPUs returns all the CPUs known by cpumanager, as reported by the + // hardware discovery. Maps to the CPU capacity. + GetAllCPUs() cpuset.CPUSet } type manager struct { @@ -137,7 +141,11 @@ type manager struct { // stateFileDirectory holds the directory where the state file for checkpoints is held. stateFileDirectory string - // allocatableCPUs is the set of online CPUs as reported by the system + // allCPUs is the set of online CPUs as reported by the system + allCPUs cpuset.CPUSet + + // allocatableCPUs is the set of online CPUs as reported by the system, + // and available for allocation, minus the reserved set allocatableCPUs cpuset.CPUSet // pendingAdmissionPod contain the pod during the admission phase @@ -157,6 +165,11 @@ func NewManager(cpuPolicyName string, cpuPolicyOptions map[string]string, reconc var policy Policy var err error + topo, err = topology.Discover(machineInfo) + if err != nil { + return nil, err + } + switch policyName(cpuPolicyName) { case PolicyNone: @@ -166,10 +179,6 @@ func NewManager(cpuPolicyName string, cpuPolicyOptions map[string]string, reconc } case PolicyStatic: - topo, err = topology.Discover(machineInfo) - if err != nil { - return nil, err - } klog.InfoS("Detected CPU topology", "topology", topo) reservedCPUs, ok := nodeAllocatableReservation[v1.ResourceCPU] @@ -206,6 +215,7 @@ func NewManager(cpuPolicyName string, cpuPolicyOptions map[string]string, reconc topology: topo, nodeAllocatableReservation: nodeAllocatableReservation, stateFileDirectory: stateFileDirectory, + allCPUs: topo.CPUDetails.CPUs(), } manager.sourcesReady = &sourcesReadyStub{} return manager, nil @@ -340,6 +350,10 @@ func (m *manager) GetAllocatableCPUs() cpuset.CPUSet { return m.allocatableCPUs.Clone() } +func (m *manager) GetAllCPUs() cpuset.CPUSet { + return m.allCPUs.Clone() +} + type reconciledContainer struct { podName string containerName string diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go index 4a03f3dd23ff..8f00ec3784b0 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go @@ -85,6 +85,11 @@ func (m *fakeManager) GetCPUAffinity(podUID, containerName string) cpuset.CPUSet return cpuset.CPUSet{} } +func (m *fakeManager) GetAllCPUs() cpuset.CPUSet { + klog.InfoS("GetAllCPUs") + return cpuset.CPUSet{} +} + // NewFakeManager creates empty/fake cpu manager func NewFakeManager() Manager { return &fakeManager{ diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/node_container_manager_linux.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/node_container_manager_linux.go index c9f013ad663d..51c14b8e8ff7 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/node_container_manager_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/node_container_manager_linux.go @@ -52,7 +52,7 @@ func (cm *containerManagerImpl) createNodeAllocatableCgroups() error { cgroupConfig := &CgroupConfig{ Name: cm.cgroupRoot, // The default limits for cpu shares can be very low which can lead to CPU starvation for pods. - ResourceParameters: getCgroupConfig(nodeAllocatable), + ResourceParameters: cm.getCgroupConfig(nodeAllocatable), } if cm.cgroupManager.Exists(cgroupConfig.Name) { return nil @@ -80,7 +80,7 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error { cgroupConfig := &CgroupConfig{ Name: cm.cgroupRoot, - ResourceParameters: getCgroupConfig(nodeAllocatable), + ResourceParameters: cm.getCgroupConfig(nodeAllocatable), } // Using ObjectReference for events as the node maybe not cached; refer to #42701 for detail. @@ -109,7 +109,7 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error { // Now apply kube reserved and system reserved limits if required. if nc.EnforceNodeAllocatable.Has(kubetypes.SystemReservedEnforcementKey) { klog.V(2).InfoS("Enforcing system reserved on cgroup", "cgroupName", nc.SystemReservedCgroupName, "limits", nc.SystemReserved) - if err := enforceExistingCgroup(cm.cgroupManager, cm.cgroupManager.CgroupName(nc.SystemReservedCgroupName), nc.SystemReserved); err != nil { + if err := cm.enforceExistingCgroup(nc.SystemReservedCgroupName, nc.SystemReserved); err != nil { message := fmt.Sprintf("Failed to enforce System Reserved Cgroup Limits on %q: %v", nc.SystemReservedCgroupName, err) cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message) return fmt.Errorf(message) @@ -118,7 +118,7 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error { } if nc.EnforceNodeAllocatable.Has(kubetypes.KubeReservedEnforcementKey) { klog.V(2).InfoS("Enforcing kube reserved on cgroup", "cgroupName", nc.KubeReservedCgroupName, "limits", nc.KubeReserved) - if err := enforceExistingCgroup(cm.cgroupManager, cm.cgroupManager.CgroupName(nc.KubeReservedCgroupName), nc.KubeReserved); err != nil { + if err := cm.enforceExistingCgroup(nc.KubeReservedCgroupName, nc.KubeReserved); err != nil { message := fmt.Sprintf("Failed to enforce Kube Reserved Cgroup Limits on %q: %v", nc.KubeReservedCgroupName, err) cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message) return fmt.Errorf(message) @@ -129,8 +129,9 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error { } // enforceExistingCgroup updates the limits `rl` on existing cgroup `cName` using `cgroupManager` interface. -func enforceExistingCgroup(cgroupManager CgroupManager, cName CgroupName, rl v1.ResourceList) error { - rp := getCgroupConfig(rl) +func (cm *containerManagerImpl) enforceExistingCgroup(cNameStr string, rl v1.ResourceList) error { + cName := cm.cgroupManager.CgroupName(cNameStr) + rp := cm.getCgroupConfig(rl) if rp == nil { return fmt.Errorf("%q cgroup is not configured properly", cName) } @@ -151,17 +152,17 @@ func enforceExistingCgroup(cgroupManager CgroupManager, cName CgroupName, rl v1. ResourceParameters: rp, } klog.V(4).InfoS("Enforcing limits on cgroup", "cgroupName", cName, "cpuShares", cgroupConfig.ResourceParameters.CPUShares, "memory", cgroupConfig.ResourceParameters.Memory, "pidsLimit", cgroupConfig.ResourceParameters.PidsLimit) - if err := cgroupManager.Validate(cgroupConfig.Name); err != nil { + if err := cm.cgroupManager.Validate(cgroupConfig.Name); err != nil { return err } - if err := cgroupManager.Update(cgroupConfig); err != nil { + if err := cm.cgroupManager.Update(cgroupConfig); err != nil { return err } return nil } // getCgroupConfig returns a ResourceConfig object that can be used to create or update cgroups via CgroupManager interface. -func getCgroupConfig(rl v1.ResourceList) *ResourceConfig { +func (cm *containerManagerImpl) getCgroupConfig(rl v1.ResourceList) *ResourceConfig { // TODO(vishh): Set CPU Quota if necessary. if rl == nil { return nil @@ -183,6 +184,18 @@ func getCgroupConfig(rl v1.ResourceList) *ResourceConfig { } rc.HugePageLimit = HugePageLimits(rl) + // In the case of a None policy, cgroupv2 and systemd cgroup manager, we must make sure systemd is aware of the cpuset cgroup. + // By default, systemd will not create it, as we've not chosen to delegate it, and we haven't included it in the Apply() request. + // However, this causes a bug where kubelet restarts unnecessarily (cpuset cgroup is created in the cgroupfs, but systemd + // doesn't know about it and deletes it, and then kubelet doesn't continue because the cgroup isn't configured as expected). + // An alternative is to delegate the `cpuset` cgroup to the kubelet, but that would require some plumbing in libcontainer, + // and this is sufficient. + // Only do so on None policy, as Static policy will do its own updating of the cpuset. + // Please see the comment on policy none's GetAllocatableCPUs + if cm.cpuManager.GetAllocatableCPUs().IsEmpty() { + rc.CPUSet = cm.cpuManager.GetAllCPUs() + } + return &rc } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/types.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/types.go index ee478f6021de..7ff0b26faa69 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/types.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/types.go @@ -19,12 +19,15 @@ package cm import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/cpuset" ) // ResourceConfig holds information about all the supported cgroup resource parameters. type ResourceConfig struct { // Memory limit (in bytes). Memory *int64 + // CPU set (number of cpus the cgroup has access to). + CPUSet cpuset.CPUSet // CPU shares (relative weight vs. other containers). CPUShares *uint64 // CPU hardcap limit (in usecs). Allowed cpu time in a given period. diff --git a/vendor/k8s.io/kubernetes/test/e2e/apimachinery/validatingadmissionpolicy.go b/vendor/k8s.io/kubernetes/test/e2e/apimachinery/validatingadmissionpolicy.go index e2fdb3ebe5db..5f4d0bbbea32 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/apimachinery/validatingadmissionpolicy.go +++ b/vendor/k8s.io/kubernetes/test/e2e/apimachinery/validatingadmissionpolicy.go @@ -366,10 +366,15 @@ var _ = SIGDescribe("ValidatingAdmissionPolicy [Privileged:ClusterAdmin]", func( // TODO(#123829) Remove once the schema watcher is merged. // If the warnings are empty, touch the policy to retry type checking if len(policy.Status.TypeChecking.ExpressionWarnings) == 0 { - applyConfig := applyadmissionregistrationv1.ValidatingAdmissionPolicy(policy.Name).WithLabels(map[string]string{ - "touched": fmt.Sprintf("a%d", time.Now().UnixMilli()), - "random": fmt.Sprintf("a%d", rand.Int()), - }) + randomValue := fmt.Sprintf(`"%d"`, rand.Int()) + // Use a fixed key and change only the value + applyConfig := applyadmissionregistrationv1.ValidatingAdmissionPolicy(policy.Name). + WithSpec(applyadmissionregistrationv1.ValidatingAdmissionPolicySpec(). + WithAuditAnnotations(applyadmissionregistrationv1.AuditAnnotation(). + WithKey("key"). + WithValueExpression(randomValue), + ), + ) _, err := client.AdmissionregistrationV1().ValidatingAdmissionPolicies().Apply(ctx, applyConfig, metav1.ApplyOptions{FieldManager: "validatingadmissionpolicy-e2e"}) return false, err } diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/gpu/.import-restrictions b/vendor/k8s.io/kubernetes/test/e2e/framework/gpu/.import-restrictions new file mode 100644 index 000000000000..03b5ee5ec2c7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/gpu/.import-restrictions @@ -0,0 +1,12 @@ +# This E2E framework sub-package is currently allowed to use arbitrary +# dependencies except of k/k/pkg, therefore we need to override the +# restrictions from the parent .import-restrictions file. +# +# At some point it may become useful to also check this package's +# dependencies more careful. +rules: + - selectorRegexp: "^k8s[.]io/kubernetes/pkg" + allowedPrefixes: [] + + - selectorRegexp: "" + allowedPrefixes: [ "" ] diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/gpu/gpu_util.go b/vendor/k8s.io/kubernetes/test/e2e/framework/gpu/gpu_util.go new file mode 100644 index 000000000000..fd9de240f4ad --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/gpu/gpu_util.go @@ -0,0 +1,28 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gpu + +const ( + // NVIDIAGPUResourceName is the extended name of the GPU resource since v1.8 + // this uses the device plugin mechanism + NVIDIAGPUResourceName = "nvidia.com/gpu" + + // GPUDevicePluginDSYAML is the official Google Device Plugin Daemonset NVIDIA GPU manifest for GKE + // TODO: Parametrize it by making it a feature in TestFramework. + // so we can override the daemonset in other setups (non COS). + GPUDevicePluginDSYAML = "https://raw.githubusercontent.com/kubernetes/kubernetes/master/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml" +) diff --git a/vendor/k8s.io/kubernetes/test/e2e/node/gpu.go b/vendor/k8s.io/kubernetes/test/e2e/node/gpu.go new file mode 100644 index 000000000000..81e2ae4b8d3c --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/e2e/node/gpu.go @@ -0,0 +1,306 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package node + +import ( + "context" + "os" + "time" + + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/uuid" + clientset "k8s.io/client-go/kubernetes" + extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/test/e2e/feature" + "k8s.io/kubernetes/test/e2e/framework" + e2edebug "k8s.io/kubernetes/test/e2e/framework/debug" + e2egpu "k8s.io/kubernetes/test/e2e/framework/gpu" + e2emanifest "k8s.io/kubernetes/test/e2e/framework/manifest" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + e2eresource "k8s.io/kubernetes/test/e2e/framework/resource" + e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" + e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles" + admissionapi "k8s.io/pod-security-admission/api" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" +) + +var _ = SIGDescribe(feature.GPUDevicePlugin, "Sanity test for Nvidia Device", func() { + + f := framework.NewDefaultFramework("nvidia-gpu") + f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged + var podClient *e2epod.PodClient + + ginkgo.BeforeEach(func() { + e2eskipper.SkipUnlessProviderIs("aws", "gce") + podClient = e2epod.NewPodClient(f) + }) + + f.It("should run nvidia-smi cli", func(ctx context.Context) { + SetupEnvironmentAndSkipIfNeeded(ctx, f, f.ClientSet) + pod := testNvidiaCLIPod() + pod.Spec.Containers[0].Command = []string{"nvidia-smi"} + + ginkgo.By("Creating a pod that runs nvidia-smi") + createAndValidatePod(ctx, f, podClient, pod) + + ginkgo.By("Getting logs from the pod") + log, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) + framework.ExpectNoError(err) + + ginkgo.By("Checking output from nvidia-smi") + gomega.Expect(log).To(gomega.ContainSubstring("NVIDIA-SMI")) + gomega.Expect(log).To(gomega.ContainSubstring("Driver Version:")) + gomega.Expect(log).To(gomega.ContainSubstring("CUDA Version:")) + }) + + f.It("should run gpu based matrix multiplication", func(ctx context.Context) { + SetupEnvironmentAndSkipIfNeeded(ctx, f, f.ClientSet) + pod := testMatrixMultiplicationPod() + + ginkgo.By("Creating a pod that runs matrix multiplication") + createAndValidatePod(ctx, f, podClient, pod) + + ginkgo.By("Getting logs from the pod") + log, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) + framework.ExpectNoError(err) + + ginkgo.By("Checking output from nvidia-smi") + gomega.Expect(log).To(gomega.ContainSubstring("TensorFlow version")) + gomega.Expect(log).To(gomega.ContainSubstring("Matrix multiplication result:")) + gomega.Expect(log).To(gomega.ContainSubstring("Time taken for 5000x5000 matrix multiplication")) + }) +}) + +func createAndValidatePod(ctx context.Context, f *framework.Framework, podClient *e2epod.PodClient, pod *v1.Pod) { + pod = podClient.Create(ctx, pod) + + ginkgo.By("Watching for error events or started pod") + ev, err := podClient.WaitForErrorEventOrSuccess(ctx, pod) + framework.ExpectNoError(err) + gomega.Expect(ev).To(gomega.BeNil()) + + ginkgo.By("Waiting for pod completion") + err = e2epod.WaitForPodNoLongerRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name) + framework.ExpectNoError(err) + pod, err = podClient.Get(ctx, pod.Name, metav1.GetOptions{}) + framework.ExpectNoError(err) + + ginkgo.By("Checking that the pod succeeded") + gomega.Expect(pod.Status.Phase).To(gomega.Equal(v1.PodSucceeded)) +} + +func testNvidiaCLIPod() *v1.Pod { + podName := "gpu-cli-" + string(uuid.NewUUID()) + pod := v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + Annotations: map[string]string{}, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "nvidia-smi", + Image: "nvidia/cuda:12.3.2-runtime-ubuntu22.04", + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + "nvidia.com/gpu": resource.MustParse("1"), + }, + }, + }, + }, + RestartPolicy: v1.RestartPolicyNever, + }, + } + return &pod +} + +func testMatrixMultiplicationPod() *v1.Pod { + podName := "gpu-matmul-" + string(uuid.NewUUID()) + pod := v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + Annotations: map[string]string{}, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "gpu-matmul", + Image: "tensorflow/tensorflow:latest-gpu", + Command: []string{ + "python", + "-c", + ` +import tensorflow as tf +import time + +print("TensorFlow version:", tf.__version__) +print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU'))) + +# Simple matrix multiplication test +with tf.device('/GPU:0'): + a = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) + b = tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]) + c = tf.matmul(a, b) + +print("Matrix multiplication result:", c.numpy()) + +# Performance test +n = 5000 +start_time = time.time() +with tf.device('/GPU:0'): + matrix1 = tf.random.normal((n, n)) + matrix2 = tf.random.normal((n, n)) + result = tf.matmul(matrix1, matrix2) +end_time = time.time() + +print(f"Time taken for {n}x{n} matrix multiplication: {end_time - start_time:.2f} seconds") +`, + }, + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + "nvidia.com/gpu": resource.MustParse("1"), + }, + }, + }, + }, + RestartPolicy: v1.RestartPolicyNever, + }, + } + return &pod +} + +func SetupEnvironmentAndSkipIfNeeded(ctx context.Context, f *framework.Framework, clientSet clientset.Interface) { + if framework.ProviderIs("gce") { + rsgather := SetupNVIDIAGPUNode(ctx, f) + defer func() { + framework.Logf("Stopping ResourceUsageGather") + constraints := make(map[string]e2edebug.ResourceConstraint) + // For now, just gets summary. Can pass valid constraints in the future. + summary, err := rsgather.StopAndSummarize([]int{50, 90, 100}, constraints) + f.TestSummaries = append(f.TestSummaries, summary) + framework.ExpectNoError(err, "getting resource usage summary") + }() + } + nodes, err := e2enode.GetReadySchedulableNodes(ctx, clientSet) + framework.ExpectNoError(err) + capacity := 0 + allocatable := 0 + for _, node := range nodes.Items { + val, ok := node.Status.Capacity[e2egpu.NVIDIAGPUResourceName] + if !ok { + continue + } + capacity += int(val.Value()) + val, ok = node.Status.Allocatable[e2egpu.NVIDIAGPUResourceName] + if !ok { + continue + } + allocatable += int(val.Value()) + } + if capacity == 0 { + e2eskipper.Skipf("%d ready nodes do not have any Nvidia GPU(s). Skipping...", len(nodes.Items)) + } + if allocatable == 0 { + e2eskipper.Skipf("%d ready nodes do not have any allocatable Nvidia GPU(s). Skipping...", len(nodes.Items)) + } +} + +func areGPUsAvailableOnAllSchedulableNodes(ctx context.Context, clientSet clientset.Interface) bool { + framework.Logf("Getting list of Nodes from API server") + nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) + framework.ExpectNoError(err, "getting node list") + for _, node := range nodeList.Items { + if node.Spec.Unschedulable { + continue + } + framework.Logf("gpuResourceName %s", e2egpu.NVIDIAGPUResourceName) + if val, ok := node.Status.Capacity[e2egpu.NVIDIAGPUResourceName]; !ok || val.Value() == 0 { + framework.Logf("Nvidia GPUs not available on Node: %q", node.Name) + return false + } + } + framework.Logf("Nvidia GPUs exist on all schedulable nodes") + return true +} + +func logOSImages(ctx context.Context, f *framework.Framework) { + nodeList, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) + framework.ExpectNoError(err, "getting node list") + for _, node := range nodeList.Items { + framework.Logf("Nodename: %v, OS Image: %v", node.Name, node.Status.NodeInfo.OSImage) + } +} + +const ( + // Nvidia driver installation can take upwards of 5 minutes. + driverInstallTimeout = 10 * time.Minute +) + +// SetupNVIDIAGPUNode install Nvidia Drivers and wait for Nvidia GPUs to be available on nodes +func SetupNVIDIAGPUNode(ctx context.Context, f *framework.Framework) *e2edebug.ContainerResourceGatherer { + logOSImages(ctx, f) + + var err error + var ds *appsv1.DaemonSet + dsYamlURLFromEnv := os.Getenv("NVIDIA_DRIVER_INSTALLER_DAEMONSET") + if dsYamlURLFromEnv != "" { + // Using DaemonSet from remote URL + framework.Logf("Using remote nvidia-driver-installer daemonset manifest from %v", dsYamlURLFromEnv) + ds, err = e2emanifest.DaemonSetFromURL(ctx, dsYamlURLFromEnv) + framework.ExpectNoError(err, "failed get remote") + } else { + // Using default local DaemonSet + framework.Logf("Using default local nvidia-driver-installer daemonset manifest.") + data, err := e2etestfiles.Read("test/e2e/testing-manifests/scheduling/nvidia-driver-installer.yaml") + framework.ExpectNoError(err, "failed to read local manifest for nvidia-driver-installer daemonset") + ds, err = e2emanifest.DaemonSetFromData(data) + framework.ExpectNoError(err, "failed to parse local manifest for nvidia-driver-installer daemonset") + } + ds.Namespace = f.Namespace.Name + _, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(ctx, ds, metav1.CreateOptions{}) + framework.ExpectNoError(err, "failed to create nvidia-driver-installer daemonset") + framework.Logf("Successfully created daemonset to install Nvidia drivers.") + + pods, err := e2eresource.WaitForControlledPods(ctx, f.ClientSet, ds.Namespace, ds.Name, extensionsinternal.Kind("DaemonSet")) + framework.ExpectNoError(err, "failed to get pods controlled by the nvidia-driver-installer daemonset") + + devicepluginPods, err := e2eresource.WaitForControlledPods(ctx, f.ClientSet, "kube-system", "nvidia-gpu-device-plugin", extensionsinternal.Kind("DaemonSet")) + if err == nil { + framework.Logf("Adding deviceplugin addon pod.") + pods.Items = append(pods.Items, devicepluginPods.Items...) + } + + framework.Logf("Starting ResourceUsageGather for the created DaemonSet pods.") + rsgather, err := e2edebug.NewResourceUsageGatherer(ctx, f.ClientSet, + e2edebug.ResourceGathererOptions{InKubemark: false, Nodes: e2edebug.AllNodes, ResourceDataGatheringPeriod: 2 * time.Second, ProbeDuration: 2 * time.Second, PrintVerboseLogs: true}, pods) + framework.ExpectNoError(err, "creating ResourceUsageGather for the daemonset pods") + go rsgather.StartGatheringData(ctx) + + // Wait for Nvidia GPUs to be available on nodes + framework.Logf("Waiting for drivers to be installed and GPUs to be available in Node Capacity...") + gomega.Eventually(ctx, func(ctx context.Context) bool { + return areGPUsAvailableOnAllSchedulableNodes(ctx, f.ClientSet) + }, driverInstallTimeout, time.Second).Should(gomega.BeTrueBecause("expected GPU resources to be available within the timout")) + + return rsgather +} diff --git a/vendor/k8s.io/kubernetes/test/utils/image/manifest.go b/vendor/k8s.io/kubernetes/test/utils/image/manifest.go index ce10f5c441e2..22de8316e85e 100644 --- a/vendor/k8s.io/kubernetes/test/utils/image/manifest.go +++ b/vendor/k8s.io/kubernetes/test/utils/image/manifest.go @@ -229,7 +229,7 @@ func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config configs[BusyBox] = Config{list.PromoterE2eRegistry, "busybox", "1.36.1-1"} configs[CudaVectorAdd] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "1.0"} configs[CudaVectorAdd2] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "2.3"} - configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.5.7"} + configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.5.9"} configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.15-0"} configs[Httpd] = Config{list.PromoterE2eRegistry, "httpd", "2.4.38-4"} configs[HttpdNew] = Config{list.PromoterE2eRegistry, "httpd", "2.4.39-4"} diff --git a/vendor/modules.txt b/vendor/modules.txt index c297f31ebcb7..31959e9486c0 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -810,7 +810,7 @@ github.com/opencontainers/runtime-spec/specs-go github.com/opencontainers/selinux/go-selinux github.com/opencontainers/selinux/go-selinux/label github.com/opencontainers/selinux/pkg/pwalkdir -# github.com/openshift/api v0.0.0-20241001152557-e415140e5d5f +# github.com/openshift/api v0.0.0-20241109205306-a2817b89f7e0 ## explicit; go 1.22.0 github.com/openshift/api github.com/openshift/api/annotations @@ -916,7 +916,7 @@ github.com/openshift/build-machinery-go/make/targets/golang github.com/openshift/build-machinery-go/make/targets/openshift github.com/openshift/build-machinery-go/make/targets/openshift/operator github.com/openshift/build-machinery-go/scripts -# github.com/openshift/client-go v0.0.0-20241001162912-da6d55e4611f +# github.com/openshift/client-go v0.0.0-20241107164952-923091dd2b1a ## explicit; go 1.22.0 github.com/openshift/client-go/apiserver/applyconfigurations/apiserver/v1 github.com/openshift/client-go/apiserver/applyconfigurations/internal @@ -1053,7 +1053,7 @@ github.com/openshift/client-go/user/listers/user/v1 # github.com/openshift/cluster-network-operator v0.0.0-20240708200319-1cd8678b38fb ## explicit; go 1.22.0 github.com/openshift/cluster-network-operator/pkg/names -# github.com/openshift/library-go v0.0.0-20241015130640-f9ecd211c68b +# github.com/openshift/library-go v0.0.0-20241107160307-0064ad7bd060 ## explicit; go 1.22.0 github.com/openshift/library-go/pkg/apiserver/admission/admissionrestconfig github.com/openshift/library-go/pkg/apiserver/admission/admissiontimeout @@ -1531,7 +1531,7 @@ golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace golang.org/x/net/websocket -# golang.org/x/oauth2 v0.21.0 +# golang.org/x/oauth2 v0.22.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/internal @@ -1626,17 +1626,17 @@ gonum.org/v1/plot/vg/vgtex # google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d ## explicit; go 1.19 google.golang.org/genproto/protobuf/field_mask -# google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 -## explicit; go 1.20 +# google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f +## explicit; go 1.21 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/expr/v1alpha1 google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20240725223205-93522f1f2a9f -## explicit; go 1.20 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 +## explicit; go 1.21 google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.65.0 +# google.golang.org/grpc v1.67.0 ## explicit; go 1.21 google.golang.org/grpc google.golang.org/grpc/attributes @@ -1655,7 +1655,9 @@ google.golang.org/grpc/credentials/insecure google.golang.org/grpc/encoding google.golang.org/grpc/encoding/gzip google.golang.org/grpc/encoding/proto +google.golang.org/grpc/experimental/stats google.golang.org/grpc/grpclog +google.golang.org/grpc/grpclog/internal google.golang.org/grpc/health google.golang.org/grpc/health/grpc_health_v1 google.golang.org/grpc/internal @@ -1679,6 +1681,7 @@ google.golang.org/grpc/internal/resolver/dns/internal google.golang.org/grpc/internal/resolver/passthrough google.golang.org/grpc/internal/resolver/unix google.golang.org/grpc/internal/serviceconfig +google.golang.org/grpc/internal/stats google.golang.org/grpc/internal/status google.golang.org/grpc/internal/syscall google.golang.org/grpc/internal/transport @@ -1686,6 +1689,7 @@ google.golang.org/grpc/internal/transport/networktype google.golang.org/grpc/interop/grpc_testing google.golang.org/grpc/interop/grpc_testing/core google.golang.org/grpc/keepalive +google.golang.org/grpc/mem google.golang.org/grpc/metadata google.golang.org/grpc/peer google.golang.org/grpc/resolver @@ -2769,7 +2773,7 @@ k8s.io/kubelet/pkg/cri/streaming k8s.io/kubelet/pkg/cri/streaming/portforward k8s.io/kubelet/pkg/cri/streaming/remotecommand k8s.io/kubelet/pkg/types -# k8s.io/kubernetes v1.31.1 => github.com/openshift/kubernetes v1.30.1-0.20241002124647-1892e4deb967 +# k8s.io/kubernetes v1.31.1 => github.com/openshift/kubernetes v1.30.1-0.20241111135607-a3f31a4f2c58 ## explicit; go 1.22.0 k8s.io/kubernetes/cmd/kube-apiserver/app k8s.io/kubernetes/cmd/kube-apiserver/app/options @@ -3495,6 +3499,7 @@ k8s.io/kubernetes/test/e2e/framework/deployment k8s.io/kubernetes/test/e2e/framework/endpoints k8s.io/kubernetes/test/e2e/framework/endpointslice k8s.io/kubernetes/test/e2e/framework/events +k8s.io/kubernetes/test/e2e/framework/gpu k8s.io/kubernetes/test/e2e/framework/internal/junit k8s.io/kubernetes/test/e2e/framework/job k8s.io/kubernetes/test/e2e/framework/kubectl @@ -3867,7 +3872,7 @@ sigs.k8s.io/yaml/goyaml.v3 # k8s.io/kube-scheduler => github.com/openshift/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20241002124647-1892e4deb967 # k8s.io/kubectl => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20241002124647-1892e4deb967 # k8s.io/kubelet => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20241002124647-1892e4deb967 -# k8s.io/kubernetes => github.com/openshift/kubernetes v1.30.1-0.20241002124647-1892e4deb967 +# k8s.io/kubernetes => github.com/openshift/kubernetes v1.30.1-0.20241111135607-a3f31a4f2c58 # k8s.io/metrics => github.com/openshift/kubernetes/staging/src/k8s.io/metrics v0.0.0-20241002124647-1892e4deb967 # k8s.io/mount-utils => github.com/openshift/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20241002124647-1892e4deb967 # k8s.io/pod-security-admission => github.com/openshift/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20241002124647-1892e4deb967 diff --git a/zz_generated.manifests/test-reporting.yaml b/zz_generated.manifests/test-reporting.yaml index a0becec6225c..7a5d2ff7ef84 100644 --- a/zz_generated.manifests/test-reporting.yaml +++ b/zz_generated.manifests/test-reporting.yaml @@ -244,6 +244,12 @@ spec: - testName: '[sig-network][OCPFeatureGate:NetworkSegmentation][Feature:UserDefinedPrimaryNetworks] when using openshift ovn-kubernetes when primary network exist, UserDefinedNetwork status should report not-ready' + - featureGate: NewOLM + tests: + - testName: '[sig-olmv1][OCPFeatureGate:NewOLM] OLMv1 CRDs should be installed' + - testName: '[sig-olmv1][OCPFeatureGate:NewOLM] OLMv1 Catalogs should be installed' + - testName: '[sig-olmv1][OCPFeatureGate:NewOLM] OLMv1 operator installation should + install a cluster extension' - featureGate: PersistentIPsForVirtualization tests: - testName: '[sig-network][OCPFeatureGate:PersistentIPsForVirtualization][Feature:Layer2LiveMigration]